Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 15 Jan 2017 19:37:43 +0000 (11:37 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 15 Jan 2017 19:37:43 +0000 (11:37 -0800)
Pull perf fixes from Ingo Molnar:
 "Misc race fixes uncovered by fuzzing efforts, a Sparse fix, two PMU
  driver fixes, plus miscellanous tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86: Reject non sampling events with precise_ip
  perf/x86/intel: Account interrupts for PEBS errors
  perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
  perf/core: Fix sys_perf_event_open() vs. hotplug
  perf/x86/intel: Use ULL constant to prevent undefined shift behaviour
  perf/x86/intel/uncore: Fix hardcoded socket 0 assumption in the Haswell init code
  perf/x86: Set pmu->module in Intel PMU modules
  perf probe: Fix to probe on gcc generated symbols for offline kernel
  perf probe: Fix --funcs to show correct symbols for offline module
  perf symbols: Robustify reading of build-id from sysfs
  perf tools: Install tools/lib/traceevent plugins with install-bin
  tools lib traceevent: Fix prev/next_prio for deadline tasks
  perf record: Fix --switch-output documentation and comment
  perf record: Make __record_options static
  tools lib subcmd: Add OPT_STRING_OPTARG_SET option
  perf probe: Fix to get correct modname from elf header
  samples/bpf trace_output_user: Remove duplicate sys/ioctl.h include
  samples/bpf sock_example: Avoid getting ethhdr from two includes
  perf sched timehist: Show total scheduling time

507 files changed:
.mailmap
Documentation/admin-guide/kernel-parameters.txt
Documentation/block/queue-sysfs.txt
Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
Documentation/driver-api/infrastructure.rst
Documentation/networking/mpls-sysctl.txt
Documentation/vfio-mediated-device.txt
Documentation/vm/page_frags [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am571x-idk.dts
arch/arm/boot/dts/am572x-idk.dts
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/dm814x.dtsi
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm-tps65917.dtsi
arch/arm/boot/dts/imx31.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/omap2.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
arch/arm/mach-davinci/clock.c
arch/arm/mach-davinci/clock.h
arch/arm/mach-davinci/da850.c
arch/arm/mach-davinci/usb-da8xx.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-imx/mach-imx1.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/gpio.c [deleted file]
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_common_data.h
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-s3c24xx/common.c
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/renesas/r8a7795-h3ulcb.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/current.h
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/fault.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/mips/kvm/entry.c
arch/mips/kvm/mips.c
arch/openrisc/kernel/vmlinux.lds.S
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/time.c
arch/parisc/mm/fault.c
arch/s390/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/s390/kernel/vtime.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/kernel/pci-swiotlb.c
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/quirks.c
arch/x86/xen/pci-swiotlb-xen.c
arch/x86/xen/setup.c
block/blk-lib.c
block/blk-wbt.c
block/blk-zoned.c
block/partition-generic.c
drivers/acpi/acpi_watchdog.c
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/base/power/domain.c
drivers/block/nbd.c
drivers/block/virtio_blk.c
drivers/block/zram/zram_drv.c
drivers/clk/clk-stm32f4.c
drivers/clk/renesas/clk-mstp.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/intel_pstate.c
drivers/devfreq/devfreq.c
drivers/devfreq/exynos-bus.c
drivers/dma/dw/Kconfig
drivers/dma/ioat/hw.h
drivers/dma/ioat/init.c
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/stm32-dma.c
drivers/dma/ti-dma-crossbar.c
drivers/firmware/arm_scpi.c
drivers/firmware/efi/fake_mem.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/memmap.c
drivers/firmware/psci_checker.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_venc.c
drivers/gpu/drm/meson/meson_venc_cvbs.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-cypress.c
drivers/hid/hid-ids.h
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/lm90.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/Kconfig
drivers/iio/common/st_sensors/st_sensors_buffer.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/counter/104-quad-8.c
drivers/iio/imu/bmi160/bmi160_core.c
drivers/iio/light/max44000.c
drivers/infiniband/hw/mlx4/main.c
drivers/input/joydev.c
drivers/input/joystick/xpad.c
drivers/input/misc/adxl34x-i2c.c
drivers/input/mouse/alps.h
drivers/input/mouse/synaptics_i2c.c
drivers/input/rmi4/Kconfig
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/iommu/amd_iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/net/appletalk/ipddp.c
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb_pci.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/mellanox/mlx4/en_clock.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/icm.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/phy/Kconfig
drivers/net/phy/dp83867.c
drivers/net/phy/marvell.c
drivers/net/phy/phy.c
drivers/net/usb/asix_devices.c
drivers/net/usb/r8152.c
drivers/net/vrf.c
drivers/net/wan/slic_ds26522.c
drivers/net/wireless/intersil/orinoco/mic.c
drivers/net/wireless/intersil/orinoco/mic.h
drivers/net/wireless/intersil/orinoco/orinoco.h
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/scsi.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/fcloop.c
drivers/nvmem/core.c
drivers/nvmem/imx-ocotp.c
drivers/nvmem/qfprom.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-exynos.h
drivers/platform/x86/Kconfig
drivers/platform/x86/fujitsu-laptop.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/rpmsg_core.c
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/qedi/Kconfig
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/snic/snic_main.c
drivers/staging/octeon/ethernet.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/target/target_core_xcopy.h
drivers/usb/core/config.c
drivers/usb/core/hub.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/blackfin.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/musbhsdma.h
drivers/usb/serial/cyberjack.c
drivers/usb/serial/f81534.c
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/omninet.c
drivers/usb/serial/oti6858.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/quatech2.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/mdev/mdev_core.c
drivers/vfio/mdev/mdev_private.h
drivers/vfio/mdev/mdev_sysfs.c
drivers/vfio/mdev/vfio_mdev.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/cobalt_lcdfb.c
drivers/xen/arm-device.c
drivers/xen/events/events_fifo.c
drivers/xen/evtchn.c
drivers/xen/swiotlb-xen.c
drivers/xen/xenbus/xenbus_comms.h
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/aio.c
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/async-thread.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/tree-log.c
fs/btrfs/uuid-tree.c
fs/buffer.c
fs/ceph/addr.c
fs/ceph/mds_client.c
fs/coredump.c
fs/crypto/keyinfo.c
fs/crypto/policy.c
fs/dax.c
fs/direct-io.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/notify/mark.c
fs/ocfs2/dlmglue.c
fs/ocfs2/stackglue.c
fs/ocfs2/stackglue.h
fs/posix_acl.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_alloc.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/libxfs/xfs_refcount_btree.c
fs/xfs/libxfs/xfs_refcount_btree.h
fs/xfs/libxfs/xfs_rmap_btree.c
fs/xfs/libxfs/xfs_rmap_btree.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_log.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_sysfs.c
include/asm-generic/asm-prototypes.h
include/dt-bindings/mfd/tps65217.h [deleted file]
include/linux/blkdev.h
include/linux/coredump.h
include/linux/efi.h
include/linux/fsnotify_backend.h
include/linux/genhd.h
include/linux/gfp.h
include/linux/iio/common/st_sensors.h
include/linux/jump_label_ratelimit.h
include/linux/mdev.h
include/linux/memcontrol.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/netdevice.h
include/linux/radix-tree.h
include/linux/remoteproc.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/swap.h
include/linux/swiotlb.h
include/linux/timerfd.h
include/sound/hdmi-codec.h
include/sound/soc.h
include/target/target_core_base.h
include/trace/events/btrfs.h
include/trace/events/mmflags.h
include/trace/events/swiotlb.h
include/uapi/linux/Kbuild
include/uapi/linux/timerfd.h [new file with mode: 0644]
include/uapi/linux/usb/functionfs.h
init/Kconfig
ipc/sem.c
kernel/audit_tree.c
kernel/bpf/arraymap.c
kernel/bpf/hashtab.c
kernel/capability.c
kernel/jump_label.c
kernel/memremap.c
kernel/signal.c
lib/Kconfig.debug
lib/iov_iter.c
lib/radix-tree.c
lib/swiotlb.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory.c
mm/page_alloc.c
mm/slab.c
mm/swapfile.c
mm/vmscan.c
mm/workingset.c
net/Kconfig
net/atm/lec.c
net/bridge/br_netfilter_hooks.c
net/core/dev.c
net/core/drop_monitor.c
net/core/flow_dissector.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dsa/dsa2.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/igmp.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_vti.c
net/ipv6/route.c
net/iucv/af_iucv.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/tx.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_payload.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
net/netlabel/netlabel_kapi.c
net/qrtr/qrtr.c
net/sched/cls_flower.c
net/sctp/outqueue.c
net/socket.c
net/wireless/nl80211.c
samples/Kconfig
samples/Makefile
samples/vfio-mdev/Makefile
samples/vfio-mdev/mtty.c
scripts/gcc-plugins/gcc-common.h
scripts/gcc-plugins/latent_entropy_plugin.c
sound/firewire/fireworks/fireworks_stream.c
sound/firewire/tascam/tascam-stream.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/nau8825.h
sound/soc/codecs/rt5645.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm_adsp.c
sound/soc/dwc/designware_i2s.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/intel/skylake/skl-sst.c
sound/soc/sh/rcar/core.c
sound/soc/soc-core.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/pcm.c
sound/usb/quirks.c
tools/perf/builtin-kmem.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/net/run_netsocktests
tools/testing/selftests/x86/protection_keys.c
usr/Makefile
virt/lib/irqbypass.c

index 02d261407683dcfa483cf15247b2cf31cff0432a..67dc22ffc9a80cb4fd6abeeba3d2ec7f7ae2ba19 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -137,6 +137,7 @@ Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
 Sam Ravnborg <sam@mars.ravnborg.org>
 Santosh Shilimkar <ssantosh@kernel.org>
 Santosh Shilimkar <santosh.shilimkar@oracle.org>
@@ -150,10 +151,13 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
+Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Subhash Jadavani <subhashj@codeaurora.org>
 Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
 Sumit Semwal <sumit.semwal@ti.com>
 Tejun Heo <htejun@gmail.com>
 Thomas Graf <tgraf@suug.ch>
+Thomas Pedersen <twp@codeaurora.org>
 Tony Luck <tony.luck@intel.com>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
index 21e2d88637050b7a33f141e558fff43d3f23d0c9..be7c0d9506b12072219f0396ceb9072eeea03df8 100644 (file)
                        use by PCI
                        Format: <irq>,<irq>...
 
+       acpi_mask_gpe=  [HW,ACPI]
+                       Due to the existence of _Lxx/_Exx, some GPEs triggered
+                       by unsupported hardware/firmware features can result in
+                        GPE floodings that cannot be automatically disabled by
+                        the GPE dispatcher.
+                       This facility can be used to prevent such uncontrolled
+                       GPE floodings.
+                       Format: <int>
+                       Support masking of GPEs numbered from 0x00 to 0x7f.
+
        acpi_no_auto_serialize  [HW,ACPI]
                        Disable auto-serialization of AML methods
                        AML control methods that contain the opcodes to create
                        it if 0 is given (See Documentation/cgroup-v1/memory.txt)
 
        swiotlb=        [ARM,IA-64,PPC,MIPS,X86]
-                       Format: { <int> | force }
+                       Format: { <int> | force | noforce }
                        <int> -- Number of I/O TLB slabs
                        force -- force using of bounce buffers even if they
                                 wouldn't be automatically used by the kernel
+                       noforce -- Never use bounce buffers (for debugging)
 
        switches=       [HW,M68k]
 
index 51642159aedbbc405d1bb90fa89402c2143f8310..c0a3bb5a6e4eb291d077f10633001c439563ccc2 100644 (file)
@@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
 
 io_poll (RW)
 ------------
-When read, this file shows the total number of block IO polls and how
-many returned success.  Writing '0' to this file will disable polling
-for this device.  Writing any non-zero value will enable this feature.
+When read, this file shows whether polling is enabled (1) or disabled
+(0).  Writing '0' to this file will disable polling for this device.
+Writing any non-zero value will enable this feature.
 
 io_poll_delay (RW)
 ------------------
index 3e5b9793341f4e10679f792c8e19e9fbb55027e1..8682ab6d4a50f86d0d352f6b5b4e8a337c5511c4 100644 (file)
@@ -8,8 +8,9 @@ This driver provides a simple power button event via an Interrupt.
 Required properties:
 - compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
 
-Required properties for TPS65218:
+Required properties:
 - interrupts: should be one of the following
+   - <2>: For controllers compatible with tps65217
    - <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
 
 Examples:
@@ -17,6 +18,7 @@ Examples:
 &tps {
        tps65217-pwrbutton {
                compatible = "ti,tps65217-pwrbutton";
+               interrupts = <2>;
        };
 };
 
index 98d131acee95dbff1631e63a5c01db39c2b3def1..a11072c5a8660d362958995a6fd27123c296b2fa 100644 (file)
@@ -2,11 +2,16 @@ TPS65217 Charger
 
 Required Properties:
 -compatible: "ti,tps65217-charger"
+-interrupts: TPS65217 interrupt numbers for the AC and USB charger input change.
+             Should be <0> for the USB charger and <1> for the AC adapter.
+-interrupt-names: Should be "USB" and "AC"
 
 This node is a subnode of the tps65217 PMIC.
 
 Example:
 
        tps65217-charger {
-               compatible = "ti,tps65090-charger";
+               compatible = "ti,tps65217-charger";
+               interrupts = <0>, <1>;
+               interrupt-names = "USB", "AC";
        };
index 0bb0b5fc951236f28c79d85a17f32e19710155da..6d9ff316b608db48b46de6413d9503e23b51536a 100644 (file)
@@ -55,21 +55,6 @@ Device Drivers DMA Management
 .. kernel-doc:: drivers/base/dma-mapping.c
    :export:
 
-Device Drivers Power Management
--------------------------------
-
-.. kernel-doc:: drivers/base/power/main.c
-   :export:
-
-Device Drivers ACPI Support
----------------------------
-
-.. kernel-doc:: drivers/acpi/scan.c
-   :export:
-
-.. kernel-doc:: drivers/acpi/scan.c
-   :internal:
-
 Device drivers PnP support
 --------------------------
 
index 9ed15f86c17c86ffa69fa3527e932b62f4b9ee20..15d8d16934fd13727bb35e9c5078484127bbfa30 100644 (file)
@@ -5,8 +5,8 @@ platform_labels - INTEGER
        possible to configure forwarding for label values equal to or
        greater than the number of platform labels.
 
-       A dense utliziation of the entries in the platform label table
-       is possible and expected aas the platform labels are locally
+       A dense utilization of the entries in the platform label table
+       is possible and expected as the platform labels are locally
        allocated.
 
        If the number of platform label table entries is set to 0 no
index b38afec35edc2b1ea3f49cce804b0d67667b0c97..d226c7a5ba8bee46aeded6bfd181b840259fd4bb 100644 (file)
@@ -127,22 +127,22 @@ the VFIO when devices are unbound from the driver.
 Physical Device Driver Interface
 --------------------------------
 
-The physical device driver interface provides the parent_ops[3] structure to
-define the APIs to manage work in the mediated core driver that is related to
-the physical device.
+The physical device driver interface provides the mdev_parent_ops[3] structure
+to define the APIs to manage work in the mediated core driver that is related
+to the physical device.
 
-The structures in the parent_ops structure are as follows:
+The structures in the mdev_parent_ops structure are as follows:
 
 * dev_attr_groups: attributes of the parent device
 * mdev_attr_groups: attributes of the mediated device
 * supported_config: attributes to define supported configurations
 
-The functions in the parent_ops structure are as follows:
+The functions in the mdev_parent_ops structure are as follows:
 
 * create: allocate basic resources in a driver for a mediated device
 * remove: free resources in a driver when a mediated device is destroyed
 
-The callbacks in the parent_ops structure are as follows:
+The callbacks in the mdev_parent_ops structure are as follows:
 
 * open: open callback of mediated device
 * close: close callback of mediated device
@@ -151,14 +151,14 @@ The callbacks in the parent_ops structure are as follows:
 * write: write emulation callback
 * mmap: mmap emulation callback
 
-A driver should use the parent_ops structure in the function call to register
-itself with the mdev core driver:
+A driver should use the mdev_parent_ops structure in the function call to
+register itself with the mdev core driver:
 
 extern int  mdev_register_device(struct device *dev,
-                                 const struct parent_ops *ops);
+                                 const struct mdev_parent_ops *ops);
 
-However, the parent_ops structure is not required in the function call that a
-driver should use to unregister itself with the mdev core driver:
+However, the mdev_parent_ops structure is not required in the function call
+that a driver should use to unregister itself with the mdev core driver:
 
 extern void mdev_unregister_device(struct device *dev);
 
@@ -223,6 +223,9 @@ Directories and files under the sysfs for Each Physical Device
 
        sprintf(buf, "%s-%s", dev_driver_string(parent->dev), group->name);
 
+  (or using mdev_parent_dev(mdev) to arrive at the parent device outside
+   of the core mdev code)
+
 * device_api
 
   This attribute should show which device API is being created, for example,
@@ -394,5 +397,5 @@ References
 
 [1] See Documentation/vfio.txt for more information on VFIO.
 [2] struct mdev_driver in include/linux/mdev.h
-[3] struct parent_ops in include/linux/mdev.h
+[3] struct mdev_parent_ops in include/linux/mdev.h
 [4] struct vfio_iommu_driver_ops in include/linux/vfio.h
diff --git a/Documentation/vm/page_frags b/Documentation/vm/page_frags
new file mode 100644 (file)
index 0000000..a671456
--- /dev/null
@@ -0,0 +1,42 @@
+Page fragments
+--------------
+
+A page fragment is an arbitrary-length arbitrary-offset area of memory
+which resides within a 0 or higher order compound page.  Multiple
+fragments within that page are individually refcounted, in the page's
+reference counter.
+
+The page_frag functions, page_frag_alloc and page_frag_free, provide a
+simple allocation framework for page fragments.  This is used by the
+network stack and network device drivers to provide a backing region of
+memory for use as either an sk_buff->head, or to be used in the "frags"
+portion of skb_shared_info.
+
+In order to make use of the page fragment APIs a backing page fragment
+cache is needed.  This provides a central point for the fragment allocation
+and tracks allows multiple calls to make use of a cached page.  The
+advantage to doing this is that multiple calls to get_page can be avoided
+which can be expensive at allocation time.  However due to the nature of
+this caching it is required that any calls to the cache be protected by
+either a per-cpu limitation, or a per-cpu limitation and forcing interrupts
+to be disabled when executing the fragment allocation.
+
+The network stack uses two separate caches per CPU to handle fragment
+allocation.  The netdev_alloc_cache is used by callers making use of the
+__netdev_alloc_frag and __netdev_alloc_skb calls.  The napi_alloc_cache is
+used by callers of the __napi_alloc_frag and __napi_alloc_skb calls.  The
+main difference between these two calls is the context in which they may be
+called.  The "netdev" prefixed functions are usable in any context as these
+functions will disable interrupts, while the "napi" prefixed functions are
+only usable within the softirq context.
+
+Many network device drivers use a similar methodology for allocating page
+fragments, but the page fragments are cached at the ring or descriptor
+level.  In order to enable these cases it is necessary to provide a generic
+way of tearing down a page cache.  For this reason __page_frag_cache_drain
+was implemented.  It allows for freeing multiple references from a single
+page via a single call.  The advantage to doing this is that it allows for
+cleaning up the multiple references that were added to a page in order to
+avoid calling get_page per allocation.
+
+Alexander Duyck, Nov 29, 2016.
index cfff2c9e3d9470550fd47dcd7b2638c77121c607..c36976d3bd1a72f42afa53b0c1c548e8f2456d19 100644 (file)
@@ -81,7 +81,6 @@ Descriptions of section entries:
        Q: Patchwork web based patch tracking system site
        T: SCM tree type and location.
           Type is one of: git, hg, quilt, stgit, topgit
-       B: Bug tracking system location.
        S: Status, one of the following:
           Supported:   Someone is actually paid to look after this.
           Maintained:  Someone actually looks after it.
@@ -2194,14 +2193,6 @@ L:       alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/atmel
 
-ATMEL DMA DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
-F:     drivers/dma/at_hdmac.c
-F:     drivers/dma/at_hdmac_regs.h
-F:     include/linux/platform_data/dma-atmel.h
-
 ATMEL XDMA DRIVER
 M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 L:     linux-arm-kernel@lists.infradead.org
@@ -3800,6 +3791,7 @@ F:        include/linux/devcoredump.h
 DEVICE FREQUENCY (DEVFREQ)
 M:     MyungJoo Ham <myungjoo.ham@samsung.com>
 M:     Kyungmin Park <kyungmin.park@samsung.com>
+R:     Chanwoo Choi <cw00.choi@samsung.com>
 L:     linux-pm@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq.git
 S:     Maintained
@@ -4116,7 +4108,7 @@ F:        drivers/gpu/drm/cirrus/
 RADEON and AMDGPU DRM DRIVERS
 M:     Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
-L:     dri-devel@lists.freedesktop.org
+L:     amd-gfx@lists.freedesktop.org
 T:     git git://people.freedesktop.org/~agd5f/linux
 S:     Supported
 F:     drivers/gpu/drm/radeon/
@@ -5080,9 +5072,11 @@ F:       drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-fbdev@vger.kernel.org
+T:     git git://github.com/bzolnier/linux.git
 Q:     http://patchwork.kernel.org/project/linux-fbdev/list/
-S:     Orphan
+S:     Maintained
 F:     Documentation/fb/
 F:     drivers/video/
 F:     include/video/
@@ -5504,6 +5498,7 @@ M:        Alex Elder <elder@kernel.org>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 S:     Maintained
 F:     drivers/staging/greybus/
+L:     greybus-dev@lists.linaro.org
 
 GREYBUS AUDIO PROTOCOLS DRIVERS
 M:     Vaibhav Agarwal <vaibhav.sr@gmail.com>
@@ -5961,6 +5956,7 @@ F:        drivers/media/platform/sti/hva
 Hyper-V CORE AND DRIVERS
 M:     "K. Y. Srinivasan" <kys@microsoft.com>
 M:     Haiyang Zhang <haiyangz@microsoft.com>
+M:     Stephen Hemminger <sthemmin@microsoft.com>
 L:     devel@linuxdriverproject.org
 S:     Maintained
 F:     arch/x86/include/asm/mshyperv.h
@@ -8174,6 +8170,15 @@ S:       Maintained
 F:     drivers/tty/serial/atmel_serial.c
 F:     include/linux/atmel_serial.h
 
+MICROCHIP / ATMEL DMA DRIVER
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     dmaengine@vger.kernel.org
+S:     Supported
+F:     drivers/dma/at_hdmac.c
+F:     drivers/dma/at_hdmac_regs.h
+F:     include/linux/platform_data/dma-atmel.h
+
 MICROCHIP / ATMEL ISC DRIVER
 M:     Songjun Wu <songjun.wu@microchip.com>
 L:     linux-media@vger.kernel.org
@@ -8852,17 +8857,22 @@ F:      drivers/video/fbdev/nvidia/
 NVM EXPRESS DRIVER
 M:     Keith Busch <keith.busch@intel.com>
 M:     Jens Axboe <axboe@fb.com>
+M:     Christoph Hellwig <hch@lst.de>
+M:     Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
-W:     https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
+T:     git://git.infradead.org/nvme.git
+W:     http://git.infradead.org/nvme.git
 S:     Supported
 F:     drivers/nvme/host/
 F:     include/linux/nvme.h
+F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS TARGET DRIVER
 M:     Christoph Hellwig <hch@lst.de>
 M:     Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
+T:     git://git.infradead.org/nvme.git
+W:     http://git.infradead.org/nvme.git
 S:     Supported
 F:     drivers/nvme/target/
 
@@ -9842,7 +9852,7 @@ M:        Mark Rutland <mark.rutland@arm.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
-F:     drivers/firmware/psci.c
+F:     drivers/firmware/psci*.c
 F:     include/linux/psci.h
 F:     include/uapi/linux/psci.h
 
@@ -13527,11 +13537,11 @@ F:    arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
-M:     Dave Chinner <david@fromorbit.com>
+M:     Darrick J. Wong <darrick.wong@oracle.com>
 M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
 W:     http://xfs.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
+T:     git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
 S:     Supported
 F:     Documentation/filesystems/xfs.txt
 F:     fs/xfs/
index 5470d599384a5ba676a60490e19baf81a1068b65..5f1a84735ff61a18fddd4bbad964f8ac89ecc8e4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Roaring Lionus
 
 # *DOCUMENTATION*
index 5fab553fd03ade24fdc1a69a83e5092b791b8a01..186c4c214e0a756b4468b597d5680093408c28c3 100644 (file)
@@ -1502,8 +1502,7 @@ source kernel/Kconfig.preempt
 
 config HZ_FIXED
        int
-       default 200 if ARCH_EBSA110 || ARCH_S3C24XX || \
-               ARCH_S5PV210 || ARCH_EXYNOS4
+       default 200 if ARCH_EBSA110
        default 128 if SOC_AT91RM9200
        default 0
 
index cccdbcb557b6d29b2e45a0c0b9fb792a469cf1fe..7327250f0bb66e716dacd07d35019c858f38cf68 100644 (file)
@@ -501,6 +501,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
        am3517-evm.dtb \
        am3517_mt_ventoux.dtb \
        logicpd-torpedo-37xx-devkit.dtb \
+       logicpd-som-lv-37xx-devkit.dtb \
        omap3430-sdp.dtb \
        omap3-beagle.dtb \
        omap3-beagle-xm.dtb \
index dc561d505bbe2ce10054c7bc19452b0b4b76fc4f..3e32dd18fd25f5720ca5449ce494c0a3282d4fc4 100644 (file)
@@ -6,8 +6,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <dt-bindings/mfd/tps65217.h>
-
 / {
        cpus {
                cpu@0 {
        ti,pmic-shutdown-controller;
 
        charger {
-               interrupts = <TPS65217_IRQ_AC>, <TPS65217_IRQ_USB>;
-               interrupts-names = "AC", "USB";
+               interrupts = <0>, <1>;
+               interrupt-names = "USB", "AC";
                status = "okay";
        };
 
        pwrbutton {
-               interrupts = <TPS65217_IRQ_PB>;
+               interrupts = <2>;
                status = "okay";
        };
 
index 64c8aa9057a3366b746078a51e662f76a35e8f4b..18d72a245e889ac28561b34417680702fb0e10e1 100644 (file)
@@ -16,6 +16,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c0;
index ac55f93fc91e997deef1815adeb9d9dee9e2c555..2df9e6050c2f382fea2d09f6a95e634210d3a88f 100644 (file)
@@ -16,6 +16,7 @@
        interrupt-parent = <&wakeupgen>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        memory@0 {
                device_type = "memory";
index d6e43e5184c1829483a3b5b0fb1cc99268a0d611..ad68d1eb3bc3d2a4feec75053323f4ed90d394e4 100644 (file)
                        linux,default-trigger = "mmc0";
                };
        };
-
-       extcon_usb2: extcon_usb2 {
-            compatible = "linux,extcon-usb-gpio";
-            id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
-       };
 };
 
 &mmc1 {
@@ -79,3 +74,8 @@
 &omap_dwc3_2 {
        extcon = <&extcon_usb2>;
 };
+
+&extcon_usb2 {
+       id-gpio = <&gpio5 7 GPIO_ACTIVE_HIGH>;
+       vbus-gpio = <&gpio7 22 GPIO_ACTIVE_HIGH>;
+};
index 27d9149cedba78c81c7025a2404d3f80b14524f6..8350b4b34b085231663c9235b570e487827a5000 100644 (file)
                reg = <0x0 0x80000000 0x0 0x80000000>;
        };
 
-       extcon_usb2: extcon_usb2 {
-               compatible = "linux,extcon-usb-gpio";
-               id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
-       };
-
        status-leds {
                compatible = "gpio-leds";
                cpu0-led {
        extcon = <&extcon_usb2>;
 };
 
+&extcon_usb2 {
+       id-gpio = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+       vbus-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>;
+};
+
 &mmc1 {
        status = "okay";
        vmmc-supply = <&v3_3d>;
@@ -87,3 +87,7 @@
 &sn65hvs882 {
        load-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>;
 };
+
+&pcie1 {
+       gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+};
index 555ae21f2b9adcf2ad811b5541a3f865997bb098..814a720d5c3db5a9d1a5c94719fdf89207c435e2 100644 (file)
                        gpio-controller;
                        #gpio-cells = <2>;
                };
+
+               extcon_usb2: tps659038_usb {
+                       compatible = "ti,palmas-usb-vid";
+                       ti,enable-vbus-detection;
+                       ti,enable-id-detection;
+                       /* ID & VBUS GPIOs provided in board dts */
+               };
        };
 };
 
 };
 
 &usb2 {
-       dr_mode = "otg";
+       dr_mode = "peripheral";
 };
 
 &mmc2 {
index 1facc5f12cef700acf90e0b2f2e8594096a9bf9a..81b8cecb58206d8c98d9d986a1995e67e17fe7ed 100644 (file)
@@ -12,6 +12,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 61dd2f6b02bcfe47d7d6a5ddd5734de0e5687e89..6db652ae9bd558021ac99a6a8bcb954a5c94b806 100644 (file)
@@ -12,6 +12,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index addb7530cfbe0dcece696c2e84fbb5ea8896ceb2..1faf24acd521d3cc4ed22d24c8fe0ff469e9a30a 100644 (file)
@@ -18,6 +18,7 @@
 
        compatible = "ti,dra7xx";
        interrupt-parent = <&crossbar_mpu>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index ee6dac44edf1ada06b65a48d213ec9d8d966119b..e6df676886c0c35052921669039ae31fd446ccbf 100644 (file)
                ti,palmas-long-press-seconds = <6>;
        };
 };
+
+&usb2_phy1 {
+       phy-supply = <&ldo4_reg>;
+};
+
+&usb2_phy2 {
+       phy-supply = <&ldo4_reg>;
+};
+
+&dss {
+       vdda_video-supply = <&ldo5_reg>;
+};
+
+&mmc1 {
+       vmmc_aux-supply = <&ldo1_reg>;
+};
index 685916e3d8a1eaefb083d2a7a81583c18a360168..85cd8be22f7155edae2d56ac5a99984427aa6131 100644 (file)
                };
        };
 
-       avic: avic-interrupt-controller@60000000 {
+       avic: interrupt-controller@68000000 {
                compatible = "fsl,imx31-avic", "fsl,avic";
                interrupt-controller;
                #interrupt-cells = <1>;
-               reg = <0x60000000 0x100000>;
+               reg = <0x68000000 0x100000>;
        };
 
        soc {
index e476d01959ea3f337eb966559c241b3ef82f7fba..26d0604847282879f2286ac9c7242a84b8ac271b 100644 (file)
                                MX6QDL_PAD_SD2_DAT1__SD2_DATA1          0x17071
                                MX6QDL_PAD_SD2_DAT2__SD2_DATA2          0x17071
                                MX6QDL_PAD_SD2_DAT3__SD2_DATA3          0x17071
-                               MX6QDL_PAD_NANDF_CS2__GPIO6_IO15        0x000b0
                        >;
                };
 
index 53e6e63cbb0235d634f108f12ad1fd75001817dd..89b834f3fa17f6b576e31fd61e8bfad205f73923 100644 (file)
                                interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
                                fsl,weim-cs-gpr = <&gpr>;
+                               status = "disabled";
                        };
 
                        ocotp: ocotp@021bc000 {
index 4fd6de29f07db21ba1c4552b8d4e37c59858699f..19cbd879c448984717a83e1d819efdec822c4957 100644 (file)
                                reg = <0x021b8000 0x4000>;
                                interrupts = <0 14 IRQ_TYPE_LEVEL_HIGH>;
                                fsl,weim-cs-gpr = <&gpr>;
+                               status = "disabled";
                        };
 
                        ocotp: ocotp@021bc000 {
index 076a30f9bcae26d8e62b119ee92fd71d1944e922..10f33301619777a9d676eb49bae893540d920973 100644 (file)
                                interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SX_CLK_EIM_SLOW>;
                                fsl,weim-cs-gpr = <&gpr>;
+                               status = "disabled";
                        };
 
                        ocotp: ocotp@021bc000 {
index 4f793a025a721b03f3a5e1f074c4c531033cf62f..f1d6de8b3c193eee0d88b0465f33de4e122ba266 100644 (file)
@@ -17,6 +17,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                serial0 = &uart1;
index 87ca50b53002b9cf244b9dca24a0f2fbdca3dabc..4d448f145ed1c2941bda6d76e91d7573c0a37568 100644 (file)
        vmmc_aux-supply = <&vsim>;
        bus-width = <8>;
        non-removable;
+       no-sdio;
+       no-sd;
 };
 
 &mmc3 {
index ecf5eb584c75058598b9c90bc3f3568bf3a7ce7e..a3ff4933dbc173936bbec5a222aba6642e70564d 100644 (file)
@@ -17,6 +17,7 @@
        interrupt-parent = <&intc>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 8087456b5fbec60c9379e5937b299ce820a7cb03..578c53f08309090069454261e09bb0f1617e4a98 100644 (file)
@@ -15,6 +15,7 @@
        interrupt-parent = <&wakeupgen>;
        #address-cells = <1>;
        #size-cells = <1>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 968c67a49dbd158b3b3ca24ad2bd85b6f45687d6..7cd92babc41a688cf8bb474972d053ea7b8b33d6 100644 (file)
@@ -17,6 +17,7 @@
 
        compatible = "ti,omap5";
        interrupt-parent = <&wakeupgen>;
+       chosen { };
 
        aliases {
                i2c0 = &i2c1;
index 268bd470c865e6022d907ca495b6913acfd7c1ef..407a4610f4a7e055a488defe3fc52c05f1b3fa63 100644 (file)
@@ -4,6 +4,7 @@
 #include <dt-bindings/clock/qcom,gcc-msm8960.h>
 #include <dt-bindings/reset/qcom,gcc-msm8960.h>
 #include <dt-bindings/clock/qcom,mmcc-msm8960.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
 #include <dt-bindings/soc/qcom,gsbi.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
        firmware {
                scm {
                        compatible = "qcom,scm-apq8064";
+
+                       clocks = <&rpmcc RPM_DAYTONA_FABRIC_CLK>;
+                       clock-names = "core";
                };
        };
 
index 102838fcc5880ca0f4fbb90a23098191004d9386..15f4fd3f469561caa7fba30fed8c6a10c18c6668 100644 (file)
@@ -81,7 +81,7 @@
                #address-cells = <0>;
                interrupt-controller;
                reg = <0 0x2c001000 0 0x1000>,
-                     <0 0x2c002000 0 0x1000>,
+                     <0 0x2c002000 0 0x2000>,
                      <0 0x2c004000 0 0x2000>,
                      <0 0x2c006000 0 0x2000>;
                interrupts = <1 9 0xf04>;
index 45d08cc37b0134c71b11bb580fb574386b46c47b..bd107c5a02267f33a02e31e0e5fd116672684d73 100644 (file)
                #address-cells = <0>;
                interrupt-controller;
                reg = <0 0x2c001000 0 0x1000>,
-                     <0 0x2c002000 0 0x1000>,
+                     <0 0x2c002000 0 0x2000>,
                      <0 0x2c004000 0 0x2000>,
                      <0 0x2c006000 0 0x2000>;
                interrupts = <1 9 0xf04>;
index 7ea617e47fe41123193bc10a9cb331d9687e015a..958b4c42d32040105fd545ee2e62ceaadb0b682c 100644 (file)
                                        switch0phy1: switch1phy0@1 {
                                                reg = <1>;
                                                interrupt-parent = <&switch0>;
-                                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;                                   };
+                                               interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+                                       };
                                        switch0phy2: switch1phy0@2 {
                                                reg = <2>;
                                                interrupt-parent = <&switch0>;
index df42c93a93d69e88db6e8e621271a59e36025c5f..f5dce9b4e617df833cd210bac2fe65ac4a93b788 100644 (file)
@@ -31,10 +31,10 @@ static LIST_HEAD(clocks);
 static DEFINE_MUTEX(clocks_mutex);
 static DEFINE_SPINLOCK(clockfw_lock);
 
-static void __clk_enable(struct clk *clk)
+void davinci_clk_enable(struct clk *clk)
 {
        if (clk->parent)
-               __clk_enable(clk->parent);
+               davinci_clk_enable(clk->parent);
        if (clk->usecount++ == 0) {
                if (clk->flags & CLK_PSC)
                        davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
@@ -44,7 +44,7 @@ static void __clk_enable(struct clk *clk)
        }
 }
 
-static void __clk_disable(struct clk *clk)
+void davinci_clk_disable(struct clk *clk)
 {
        if (WARN_ON(clk->usecount == 0))
                return;
@@ -56,7 +56,7 @@ static void __clk_disable(struct clk *clk)
                        clk->clk_disable(clk);
        }
        if (clk->parent)
-               __clk_disable(clk->parent);
+               davinci_clk_disable(clk->parent);
 }
 
 int davinci_clk_reset(struct clk *clk, bool reset)
@@ -103,7 +103,7 @@ int clk_enable(struct clk *clk)
                return -EINVAL;
 
        spin_lock_irqsave(&clockfw_lock, flags);
-       __clk_enable(clk);
+       davinci_clk_enable(clk);
        spin_unlock_irqrestore(&clockfw_lock, flags);
 
        return 0;
@@ -118,7 +118,7 @@ void clk_disable(struct clk *clk)
                return;
 
        spin_lock_irqsave(&clockfw_lock, flags);
-       __clk_disable(clk);
+       davinci_clk_disable(clk);
        spin_unlock_irqrestore(&clockfw_lock, flags);
 }
 EXPORT_SYMBOL(clk_disable);
index e2a5437a1aee3c017f4f353410e0f0e314a96274..fa2b83752e030a494ae3f02a09db118e463488cd 100644 (file)
@@ -132,6 +132,8 @@ int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate);
 int davinci_set_refclk_rate(unsigned long rate);
 int davinci_simple_set_rate(struct clk *clk, unsigned long rate);
 int davinci_clk_reset(struct clk *clk, bool reset);
+void davinci_clk_enable(struct clk *clk);
+void davinci_clk_disable(struct clk *clk);
 
 extern struct platform_device davinci_wdt_device;
 extern void davinci_watchdog_reset(struct platform_device *);
index e770c97ea45c28bfc77232bb1873f976dcfc271c..1d873d15b545c26a97eefdf15d92d437ff2e1701 100644 (file)
@@ -319,6 +319,16 @@ static struct clk emac_clk = {
        .gpsc           = 1,
 };
 
+/*
+ * In order to avoid adding the emac_clk to the clock lookup table twice (and
+ * screwing up the linked list in the process) create a separate clock for
+ * mdio inheriting the rate from emac_clk.
+ */
+static struct clk mdio_clk = {
+       .name           = "mdio",
+       .parent         = &emac_clk,
+};
+
 static struct clk mcasp_clk = {
        .name           = "mcasp",
        .parent         = &async3_clk,
@@ -367,6 +377,16 @@ static struct clk aemif_clk = {
        .flags          = ALWAYS_ENABLED,
 };
 
+/*
+ * In order to avoid adding the aemif_clk to the clock lookup table twice (and
+ * screwing up the linked list in the process) create a separate clock for
+ * nand inheriting the rate from aemif_clk.
+ */
+static struct clk aemif_nand_clk = {
+       .name           = "nand",
+       .parent         = &aemif_clk,
+};
+
 static struct clk usb11_clk = {
        .name           = "usb11",
        .parent         = &pll0_sysclk4,
@@ -529,7 +549,7 @@ static struct clk_lookup da850_clks[] = {
        CLK(NULL,               "arm",          &arm_clk),
        CLK(NULL,               "rmii",         &rmii_clk),
        CLK("davinci_emac.1",   NULL,           &emac_clk),
-       CLK("davinci_mdio.0",   "fck",          &emac_clk),
+       CLK("davinci_mdio.0",   "fck",          &mdio_clk),
        CLK("davinci-mcasp.0",  NULL,           &mcasp_clk),
        CLK("davinci-mcbsp.0",  NULL,           &mcbsp0_clk),
        CLK("davinci-mcbsp.1",  NULL,           &mcbsp1_clk),
@@ -537,7 +557,15 @@ static struct clk_lookup da850_clks[] = {
        CLK("da830-mmc.0",      NULL,           &mmcsd0_clk),
        CLK("da830-mmc.1",      NULL,           &mmcsd1_clk),
        CLK("ti-aemif",         NULL,           &aemif_clk),
-       CLK(NULL,               "aemif",        &aemif_clk),
+       /*
+        * The only user of this clock is davinci_nand and it get's it through
+        * con_id. The nand node itself is created from within the aemif
+        * driver to guarantee that it's probed after the aemif timing
+        * parameters are configured. of_dev_auxdata is not accessible from
+        * the aemif driver and can't be passed to of_platform_populate(). For
+        * that reason we're leaving the dev_id here as NULL.
+        */
+       CLK(NULL,               "aemif",        &aemif_nand_clk),
        CLK("ohci-da8xx",       "usb11",        &usb11_clk),
        CLK("musb-da8xx",       "usb20",        &usb20_clk),
        CLK("spi_davinci.0",    NULL,           &spi0_clk),
index c6feecf7ae242f36ce02ff1c2c43a8c4ad6807d1..9a6af0bd5dc340690bbe794b972b0aeb529faeff 100644 (file)
@@ -22,6 +22,8 @@
 #define DA8XX_USB0_BASE                0x01e00000
 #define DA8XX_USB1_BASE                0x01e25000
 
+static struct clk *usb20_clk;
+
 static struct platform_device da8xx_usb_phy = {
        .name           = "da8xx-usb-phy",
        .id             = -1,
@@ -158,26 +160,13 @@ int __init da8xx_register_usb_refclkin(int rate)
 
 static void usb20_phy_clk_enable(struct clk *clk)
 {
-       struct clk *usb20_clk;
-       int err;
        u32 val;
        u32 timeout = 500000; /* 500 msec */
 
        val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
 
-       usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
-       if (IS_ERR(usb20_clk)) {
-               pr_err("could not get usb20 clk: %ld\n", PTR_ERR(usb20_clk));
-               return;
-       }
-
        /* The USB 2.O PLL requires that the USB 2.O PSC is enabled as well. */
-       err = clk_prepare_enable(usb20_clk);
-       if (err) {
-               pr_err("failed to enable usb20 clk: %d\n", err);
-               clk_put(usb20_clk);
-               return;
-       }
+       davinci_clk_enable(usb20_clk);
 
        /*
         * Turn on the USB 2.0 PHY, but just the PLL, and not OTG. The USB 1.1
@@ -197,8 +186,7 @@ static void usb20_phy_clk_enable(struct clk *clk)
 
        pr_err("Timeout waiting for USB 2.0 PHY clock good\n");
 done:
-       clk_disable_unprepare(usb20_clk);
-       clk_put(usb20_clk);
+       davinci_clk_disable(usb20_clk);
 }
 
 static void usb20_phy_clk_disable(struct clk *clk)
@@ -285,11 +273,19 @@ static struct clk_lookup usb20_phy_clk_lookup =
 int __init da8xx_register_usb20_phy_clk(bool use_usb_refclkin)
 {
        struct clk *parent;
-       int ret = 0;
+       int ret;
+
+       usb20_clk = clk_get(&da8xx_usb20_dev.dev, "usb20");
+       ret = PTR_ERR_OR_ZERO(usb20_clk);
+       if (ret)
+               return ret;
 
        parent = clk_get(NULL, use_usb_refclkin ? "usb_refclkin" : "pll0_aux");
-       if (IS_ERR(parent))
-               return PTR_ERR(parent);
+       ret = PTR_ERR_OR_ZERO(parent);
+       if (ret) {
+               clk_put(usb20_clk);
+               return ret;
+       }
 
        usb20_phy_clk.parent = parent;
        ret = clk_register(&usb20_phy_clk);
index 98ffe1e62ad5d6debe7c087743d728d5730c26d3..a5d68411a037994cfcf7f3c2b62c0afb5d91617f 100644 (file)
@@ -385,36 +385,6 @@ fail:
        return pen_release != -1 ? ret : 0;
 }
 
-/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
- */
-
-static void __init exynos_smp_init_cpus(void)
-{
-       void __iomem *scu_base = scu_base_addr();
-       unsigned int i, ncores;
-
-       if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
-               ncores = scu_base ? scu_get_core_count(scu_base) : 1;
-       else
-               /*
-                * CPU Nodes are passed thru DT and set_cpu_possible
-                * is set by "arm_dt_init_cpu_maps".
-                */
-               return;
-
-       /* sanity check */
-       if (ncores > nr_cpu_ids) {
-               pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
-                       ncores, nr_cpu_ids);
-               ncores = nr_cpu_ids;
-       }
-
-       for (i = 0; i < ncores; i++)
-               set_cpu_possible(i, true);
-}
-
 static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
 {
        int i;
@@ -479,7 +449,6 @@ static void exynos_cpu_die(unsigned int cpu)
 #endif /* CONFIG_HOTPLUG_CPU */
 
 const struct smp_operations exynos_smp_ops __initconst = {
-       .smp_init_cpus          = exynos_smp_init_cpus,
        .smp_prepare_cpus       = exynos_smp_prepare_cpus,
        .smp_secondary_init     = exynos_secondary_init,
        .smp_boot_secondary     = exynos_boot_secondary,
index de5ab8d88549de87ea88fc92c2bf4eda7a022e38..3a8406e45b65dceedf0abdb02f59b4b30bf394a8 100644 (file)
@@ -37,7 +37,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
 };
 
 DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
-       .map_io         = debug_ll_io_init,
        .init_early     = imx1_init_early,
        .init_irq       = imx1_init_irq,
        .dt_compat      = imx1_dt_board_compat,
index 469894082fea00c9605ea4dc370d69b18f5b40d3..093458b62c8dadbcc3c7cc1c3b66d84e59af3d8d 100644 (file)
@@ -7,7 +7,7 @@ ccflags-y := -I$(srctree)/$(src)/include \
 
 # Common support
 obj-y := id.o io.o control.o devices.o fb.o timer.o pm.o \
-        common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
+        common.o dma.o wd_timer.o display.o i2c.o hdq1w.o omap_hwmod.o \
         omap_device.o omap-headsmp.o sram.o drm.o
 
 hwmod-common                           = omap_hwmod.o omap_hwmod_reset.o \
index 36d9943205ca4bb7bff163656f7760129c620e3c..dc9e34e670a26f280bdfe7947fa8709ee750465f 100644 (file)
@@ -304,7 +304,7 @@ DT_MACHINE_START(AM43_DT, "Generic AM43 (Flattened Device Tree)")
        .init_late      = am43xx_init_late,
        .init_irq       = omap_gic_of_init,
        .init_machine   = omap_generic_init,
-       .init_time      = omap4_local_timer_init,
+       .init_time      = omap3_gptimer_timer_init,
        .dt_compat      = am43_boards_compat,
        .restart        = omap44xx_restart,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
deleted file mode 100644 (file)
index 7a57714..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * OMAP2+ specific gpio initialization
- *
- * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
- *
- * Author:
- *     Charulatha V <charu@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/gpio.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include "soc.h"
-#include "omap_hwmod.h"
-#include "omap_device.h"
-#include "omap-pm.h"
-
-#include "powerdomain.h"
-
-static int __init omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
-{
-       struct platform_device *pdev;
-       struct omap_gpio_platform_data *pdata;
-       struct omap_gpio_dev_attr *dev_attr;
-       char *name = "omap_gpio";
-       int id;
-       struct powerdomain *pwrdm;
-
-       /*
-        * extract the device id from name field available in the
-        * hwmod database and use the same for constructing ids for
-        * gpio devices.
-        * CAUTION: Make sure the name in the hwmod database does
-        * not change. If changed, make corresponding change here
-        * or make use of static variable mechanism to handle this.
-        */
-       sscanf(oh->name, "gpio%d", &id);
-
-       pdata = kzalloc(sizeof(struct omap_gpio_platform_data), GFP_KERNEL);
-       if (!pdata) {
-               pr_err("gpio%d: Memory allocation failed\n", id);
-               return -ENOMEM;
-       }
-
-       dev_attr = (struct omap_gpio_dev_attr *)oh->dev_attr;
-       pdata->bank_width = dev_attr->bank_width;
-       pdata->dbck_flag = dev_attr->dbck_flag;
-       pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
-       pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
-       if (!pdata->regs) {
-               pr_err("gpio%d: Memory allocation failed\n", id);
-               kfree(pdata);
-               return -ENOMEM;
-       }
-
-       switch (oh->class->rev) {
-       case 0:
-               if (id == 1)
-                       /* non-wakeup GPIO pins for OMAP2 Bank1 */
-                       pdata->non_wakeup_gpios = 0xe203ffc0;
-               else if (id == 2)
-                       /* non-wakeup GPIO pins for OMAP2 Bank2 */
-                       pdata->non_wakeup_gpios = 0x08700040;
-               /* fall through */
-
-       case 1:
-               pdata->regs->revision = OMAP24XX_GPIO_REVISION;
-               pdata->regs->direction = OMAP24XX_GPIO_OE;
-               pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
-               pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
-               pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
-               pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
-               pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
-               pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
-               pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
-               pdata->regs->irqenable2 = OMAP24XX_GPIO_IRQENABLE2;
-               pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
-               pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
-               pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
-               pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
-               pdata->regs->ctrl = OMAP24XX_GPIO_CTRL;
-               pdata->regs->wkup_en = OMAP24XX_GPIO_WAKE_EN;
-               pdata->regs->leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0;
-               pdata->regs->leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1;
-               pdata->regs->risingdetect = OMAP24XX_GPIO_RISINGDETECT;
-               pdata->regs->fallingdetect = OMAP24XX_GPIO_FALLINGDETECT;
-               break;
-       case 2:
-               pdata->regs->revision = OMAP4_GPIO_REVISION;
-               pdata->regs->direction = OMAP4_GPIO_OE;
-               pdata->regs->datain = OMAP4_GPIO_DATAIN;
-               pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
-               pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
-               pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
-               pdata->regs->irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0;
-               pdata->regs->irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1;
-               pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
-               pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
-               pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
-               pdata->regs->irqenable2 = OMAP4_GPIO_IRQSTATUSSET1;
-               pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
-               pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
-               pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
-               pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
-               pdata->regs->ctrl = OMAP4_GPIO_CTRL;
-               pdata->regs->wkup_en = OMAP4_GPIO_IRQWAKEN0;
-               pdata->regs->leveldetect0 = OMAP4_GPIO_LEVELDETECT0;
-               pdata->regs->leveldetect1 = OMAP4_GPIO_LEVELDETECT1;
-               pdata->regs->risingdetect = OMAP4_GPIO_RISINGDETECT;
-               pdata->regs->fallingdetect = OMAP4_GPIO_FALLINGDETECT;
-               break;
-       default:
-               WARN(1, "Invalid gpio bank_type\n");
-               kfree(pdata->regs);
-               kfree(pdata);
-               return -EINVAL;
-       }
-
-       pwrdm = omap_hwmod_get_pwrdm(oh);
-       pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
-
-       pdev = omap_device_build(name, id - 1, oh, pdata, sizeof(*pdata));
-       kfree(pdata);
-
-       if (IS_ERR(pdev)) {
-               WARN(1, "Can't build omap_device for %s:%s.\n",
-                                       name, oh->name);
-               return PTR_ERR(pdev);
-       }
-
-       return 0;
-}
-
-/*
- * gpio_init needs to be done before
- * machine_init functions access gpio APIs.
- * Hence gpio_init is a omap_postcore_initcall.
- */
-static int __init omap2_gpio_init(void)
-{
-       /* If dtb is there, the devices will be created dynamically */
-       if (of_have_populated_dt())
-               return -ENODEV;
-
-       return omap_hwmod_for_each_by_class("gpio", omap2_gpio_dev_init, NULL);
-}
-omap_postcore_initcall(omap2_gpio_init);
index 759e1d45ba25c3977af1d7048bec577062a51b66..e8b988714a09f842c01b5ea8a22d09c31ba20f1d 100644 (file)
@@ -741,14 +741,14 @@ static int _init_main_clk(struct omap_hwmod *oh)
        int ret = 0;
        char name[MOD_CLK_MAX_NAME_LEN];
        struct clk *clk;
+       static const char modck[] = "_mod_ck";
 
-       /* +7 magic comes from '_mod_ck' suffix */
-       if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
+       if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
                pr_warn("%s: warning: cropping name for %s\n", __func__,
                        oh->name);
 
-       strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7);
-       strcat(name, "_mod_ck");
+       strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
+       strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
 
        clk = clk_get(NULL, name);
        if (!IS_ERR(clk)) {
index cdfbb44ceb0c4f3059bc6568e2e765304f7e374a..f22e9cb39f4ac16af15020a0990e32dd975a9bd3 100644 (file)
@@ -121,10 +121,6 @@ extern struct omap_hwmod_irq_info omap2_uart3_mpu_irqs[];
 extern struct omap_hwmod_irq_info omap2_dispc_irqs[];
 extern struct omap_hwmod_irq_info omap2_i2c1_mpu_irqs[];
 extern struct omap_hwmod_irq_info omap2_i2c2_mpu_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio1_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio2_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio3_irqs[];
-extern struct omap_hwmod_irq_info omap2_gpio4_irqs[];
 extern struct omap_hwmod_irq_info omap2_dma_system_irqs[];
 extern struct omap_hwmod_irq_info omap2_mcspi1_mpu_irqs[];
 extern struct omap_hwmod_irq_info omap2_mcspi2_mpu_irqs[];
index 5b2f5138d938ac626a1895b71a9a44cd0d757785..2b138b65129a5d609ff2ae02a55181c10154113b 100644 (file)
@@ -295,10 +295,8 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
                GFP_KERNEL);
 
        if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
-           !prcm_irq_setup->priority_mask) {
-               pr_err("PRCM: kzalloc failed\n");
+           !prcm_irq_setup->priority_mask)
                goto err;
-       }
 
        memset(mask, 0, sizeof(mask));
 
index 56128da23c3a1531994e37522a05e625764cc6ee..07dd692c47372f8aa145d00cb5532b75774429c5 100644 (file)
@@ -510,18 +510,19 @@ void __init omap3_secure_sync32k_timer_init(void)
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX)
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX) || \
+       defined(CONFIG_SOC_AM43XX)
 void __init omap3_gptimer_timer_init(void)
 {
        __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
                        1, "timer_sys_ck", "ti,timer-alwon", true);
-
-       clocksource_probe();
+       if (of_have_populated_dt())
+               clocksource_probe();
 }
 #endif
 
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) ||         \
-       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
+       defined(CONFIG_SOC_DRA7XX)
 static void __init omap4_sync32k_timer_init(void)
 {
        __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
index f6c3f151d0d48c2c62ca056a18fbbe16edc309d9..b59f4f4f256f2bd6785b086c40bbefb0bb68315c 100644 (file)
@@ -345,10 +345,40 @@ static struct s3c24xx_dma_channel s3c2410_dma_channels[DMACH_MAX] = {
        [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), },
 };
 
+static const struct dma_slave_map s3c2410_dma_slave_map[] = {
+       { "s3c2410-sdi", "rx-tx", (void *)DMACH_SDI },
+       { "s3c2410-spi.0", "rx", (void *)DMACH_SPI0_RX },
+       { "s3c2410-spi.0", "tx", (void *)DMACH_SPI0_TX },
+       { "s3c2410-spi.1", "rx", (void *)DMACH_SPI1_RX },
+       { "s3c2410-spi.1", "tx", (void *)DMACH_SPI1_TX },
+       /*
+        * The DMA request source[1] (DMACH_UARTx_SRC2) are
+        * not used in the UART driver.
+        */
+       { "s3c2410-uart.0", "rx", (void *)DMACH_UART0 },
+       { "s3c2410-uart.0", "tx", (void *)DMACH_UART0 },
+       { "s3c2410-uart.1", "rx", (void *)DMACH_UART1 },
+       { "s3c2410-uart.1", "tx", (void *)DMACH_UART1 },
+       { "s3c2410-uart.2", "rx", (void *)DMACH_UART2 },
+       { "s3c2410-uart.2", "tx", (void *)DMACH_UART2 },
+       { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+       { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+       { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+       { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
 static struct s3c24xx_dma_platdata s3c2410_dma_platdata = {
        .num_phy_channels = 4,
        .channels = s3c2410_dma_channels,
        .num_channels = DMACH_MAX,
+       .slave_map = s3c2410_dma_slave_map,
+       .slavecnt = ARRAY_SIZE(s3c2410_dma_slave_map),
 };
 
 struct platform_device s3c2410_device_dma = {
@@ -388,10 +418,36 @@ static struct s3c24xx_dma_channel s3c2412_dma_channels[DMACH_MAX] = {
        [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, 16 },
 };
 
+static const struct dma_slave_map s3c2412_dma_slave_map[] = {
+       { "s3c2412-sdi", "rx-tx", (void *)DMACH_SDI },
+       { "s3c2412-spi.0", "rx", (void *)DMACH_SPI0_RX },
+       { "s3c2412-spi.0", "tx", (void *)DMACH_SPI0_TX },
+       { "s3c2412-spi.1", "rx", (void *)DMACH_SPI1_RX },
+       { "s3c2412-spi.1", "tx", (void *)DMACH_SPI1_TX },
+       { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+       { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+       { "s3c2412-iis", "rx", (void *)DMACH_I2S_IN },
+       { "s3c2412-iis", "tx", (void *)DMACH_I2S_OUT },
+       { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 },
+       { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 },
+       { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 },
+       { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 },
+       { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 }
+};
+
 static struct s3c24xx_dma_platdata s3c2412_dma_platdata = {
        .num_phy_channels = 4,
        .channels = s3c2412_dma_channels,
        .num_channels = DMACH_MAX,
+       .slave_map = s3c2412_dma_slave_map,
+       .slavecnt = ARRAY_SIZE(s3c2412_dma_slave_map),
 };
 
 struct platform_device s3c2412_device_dma = {
@@ -534,10 +590,30 @@ static struct s3c24xx_dma_channel s3c2443_dma_channels[DMACH_MAX] = {
        [DMACH_MIC_IN] = { S3C24XX_DMA_APB, true, 29 },
 };
 
+static const struct dma_slave_map s3c2443_dma_slave_map[] = {
+       { "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI },
+       { "s3c2443-spi.0", "rx", (void *)DMACH_SPI0_RX },
+       { "s3c2443-spi.0", "tx", (void *)DMACH_SPI0_TX },
+       { "s3c2443-spi.1", "rx", (void *)DMACH_SPI1_RX },
+       { "s3c2443-spi.1", "tx", (void *)DMACH_SPI1_TX },
+       { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 },
+       { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 },
+       { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 },
+       { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 },
+       { "s3c2440-uart.3", "rx", (void *)DMACH_UART3 },
+       { "s3c2440-uart.3", "tx", (void *)DMACH_UART3 },
+       { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN },
+       { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT },
+};
+
 static struct s3c24xx_dma_platdata s3c2443_dma_platdata = {
        .num_phy_channels = 6,
        .channels = s3c2443_dma_channels,
        .num_channels = DMACH_MAX,
+       .slave_map = s3c2443_dma_slave_map,
+       .slavecnt = ARRAY_SIZE(s3c2443_dma_slave_map),
 };
 
 struct platform_device s3c2443_device_dma = {
index fc033c0d2a0f21b443ed0e28ad35d87598870e55..eada0b58ba1c7637d46fffaf7eadaf37380d41e8 100644 (file)
                                status = "disabled";
                        };
                };
+
+               vpu: vpu@d0100000 {
+                       compatible = "amlogic,meson-gx-vpu";
+                       reg = <0x0 0xd0100000 0x0 0x100000>,
+                             <0x0 0xc883c000 0x0 0x1000>,
+                             <0x0 0xc8838000 0x0 0x1000>;
+                       reg-names = "vpu", "hhi", "dmc";
+                       interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* CVBS VDAC output port */
+                       cvbs_vdac_port: port@0 {
+                               reg = <0>;
+                       };
+               };
        };
 };
index 969682092e0fe040fb1b6de918de99d095fd2c91..4cbd626a9e887b01285c6052fb3590d46bcae176 100644 (file)
                clocks = <&wifi32k>;
                clock-names = "ext_clock";
        };
+
+       cvbs-connector {
+               compatible = "composite-video-connector";
+
+               port {
+                       cvbs_connector_in: endpoint {
+                               remote-endpoint = <&cvbs_vdac_out>;
+                       };
+               };
+       };
 };
 
 &uart_AO {
        clocks = <&clkc CLKID_FCLK_DIV4>;
        clock-names = "clkin0";
 };
+
+&cvbs_vdac_port {
+       cvbs_vdac_out: endpoint {
+               remote-endpoint = <&cvbs_connector_in>;
+       };
+};
index 203be28978d5ede48d7050356f14a2ad19423a7e..4a96e0f6f9265b858287c039714e652062134c9e 100644 (file)
                clocks = <&wifi32k>;
                clock-names = "ext_clock";
        };
+
+       cvbs-connector {
+               compatible = "composite-video-connector";
+
+               port {
+                       cvbs_connector_in: endpoint {
+                               remote-endpoint = <&cvbs_vdac_out>;
+                       };
+               };
+       };
 };
 
 /* This UART is brought out to the DB9 connector */
        clocks = <&clkc CLKID_FCLK_DIV4>;
        clock-names = "clkin0";
 };
+
+&cvbs_vdac_port {
+       cvbs_vdac_out: endpoint {
+               remote-endpoint = <&cvbs_connector_in>;
+       };
+};
index 51edd5b5c4604518bf8e329c11d123c2aa706af6..596240c38a9cdd7720077a8f97a5c0111366e550 100644 (file)
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
+
+&vpu {
+       compatible = "amlogic,meson-gxbb-vpu", "amlogic,meson-gx-vpu";
+};
index e99101ae966438414c055f54c635c8b0878a528e..cea4a3eded9b805983e0af414cfd6d901ad5a703 100644 (file)
                clocks = <&wifi32k>;
                clock-names = "ext_clock";
        };
+
+       cvbs-connector {
+               compatible = "composite-video-connector";
+
+               port {
+                       cvbs_connector_in: endpoint {
+                               remote-endpoint = <&cvbs_vdac_out>;
+                       };
+               };
+       };
 };
 
 &uart_AO {
        clocks = <&clkc CLKID_FCLK_DIV4>;
        clock-names = "clkin0";
 };
+
+&cvbs_vdac_port {
+       cvbs_vdac_out: endpoint {
+               remote-endpoint = <&cvbs_connector_in>;
+       };
+};
index 9f89b99c4806776d1a009c9e02f02e3584e8c294..69216246275dfa05e852e513337eea1cb8d53e55 100644 (file)
@@ -43,7 +43,7 @@
 
 #include "meson-gx.dtsi"
 #include <dt-bindings/clock/gxbb-clkc.h>
-#include <dt-bindings/gpio/meson-gxbb-gpio.h>
+#include <dt-bindings/gpio/meson-gxl-gpio.h>
 
 / {
        compatible = "amlogic,meson-gxl";
                 <&clkc CLKID_FCLK_DIV2>;
        clock-names = "core", "clkin0", "clkin1";
 };
+
+&vpu {
+       compatible = "amlogic,meson-gxl-vpu", "amlogic,meson-gx-vpu";
+};
index f859d75db8bd936845663084ffbfd7309148a9f4..5a337d339df1d58b984ab575ba88b07a03a067c2 100644 (file)
                compatible = "mmc-pwrseq-emmc";
                reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>;
        };
+
+       cvbs-connector {
+               compatible = "composite-video-connector";
+
+               port {
+                       cvbs_connector_in: endpoint {
+                               remote-endpoint = <&cvbs_vdac_out>;
+                       };
+               };
+       };
 };
 
 /* This UART is brought out to the DB9 connector */
                max-speed = <1000>;
        };
 };
+
+&cvbs_vdac_port {
+       cvbs_vdac_out: endpoint {
+               remote-endpoint = <&cvbs_connector_in>;
+       };
+};
index c1974bbbddead8d3b955e105ec018701fca3ffd7..eb2f0c3e5e538e4bebf211512bccd4a8eaab02b5 100644 (file)
                };
        };
 };
+
+&vpu {
+       compatible = "amlogic,meson-gxm-vpu", "amlogic,meson-gx-vpu";
+};
index a852e28a40e17761b5b393b21d9798eb9a80cd8b..a83ed2c6bbf79afd64c24577ca2530e7da6f0b87 100644 (file)
@@ -81,7 +81,7 @@
                #address-cells = <0>;
                interrupt-controller;
                reg = <0x0 0x2c001000 0 0x1000>,
-                     <0x0 0x2c002000 0 0x1000>,
+                     <0x0 0x2c002000 0 0x2000>,
                      <0x0 0x2c004000 0 0x2000>,
                      <0x0 0x2c006000 0 0x2000>;
                interrupts = <1 9 0xf04>;
index 9d1d7ad9b075a314d5777260622d66487c82cf45..29ed6b61c737a7827eb8caae01ef335fdbd222b1 100644 (file)
                        reg = <0x0 0x86000000 0x0 0x200000>;
                        no-map;
                };
+
+               memory@85800000 {
+                       reg = <0x0 0x85800000 0x0 0x800000>;
+                       no-map;
+               };
+
+               memory@86200000 {
+                       reg = <0x0 0x86200000 0x0 0x2600000>;
+                       no-map;
+               };
        };
 
        cpus {
index 6ffb0517421a92f40d43ffa37682bfd825efca25..dbea2c3d8f0c229f05573f94b9318837a8c11300 100644 (file)
                power-source = <3300>;
        };
 
-       sdhi0_pins_uhs: sd0 {
+       sdhi0_pins_uhs: sd0_uhs {
                groups = "sdhi0_data4", "sdhi0_ctrl";
                function = "sdhi0";
                power-source = <1800>;
index 869dded0f09f810f8b05005eed0e3d1d86287b93..33b744d547392d6464d34d5ca91d1296454ad784 100644 (file)
@@ -331,6 +331,7 @@ CONFIG_DRM_VC4=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_I2C_ADV7511=m
 CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MESON=m
 CONFIG_FB=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_GENERIC=m
index 446f6c46d4b17b352ef695665409c6863d470706..3a4301163e04a26d979bf506f8c4a537a2dd9715 100644 (file)
@@ -164,22 +164,25 @@ lr        .req    x30             // link register
 
 /*
  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
  */
        /*
         * @dst: destination register (64 bit wide)
         * @sym: name of the symbol
-        * @tmp: optional scratch register to be used if <dst> == sp, which
-        *       is not allowed in an adrp instruction
         */
-       .macro  adr_l, dst, sym, tmp=
-       .ifb    \tmp
+       .macro  adr_l, dst, sym
+#ifndef MODULE
        adrp    \dst, \sym
        add     \dst, \dst, :lo12:\sym
-       .else
-       adrp    \tmp, \sym
-       add     \dst, \tmp, :lo12:\sym
-       .endif
+#else
+       movz    \dst, #:abs_g3:\sym
+       movk    \dst, #:abs_g2_nc:\sym
+       movk    \dst, #:abs_g1_nc:\sym
+       movk    \dst, #:abs_g0_nc:\sym
+#endif
        .endm
 
        /*
@@ -190,6 +193,7 @@ lr  .req    x30             // link register
         *       the address
         */
        .macro  ldr_l, dst, sym, tmp=
+#ifndef MODULE
        .ifb    \tmp
        adrp    \dst, \sym
        ldr     \dst, [\dst, :lo12:\sym]
@@ -197,6 +201,15 @@ lr .req    x30             // link register
        adrp    \tmp, \sym
        ldr     \dst, [\tmp, :lo12:\sym]
        .endif
+#else
+       .ifb    \tmp
+       adr_l   \dst, \sym
+       ldr     \dst, [\dst]
+       .else
+       adr_l   \tmp, \sym
+       ldr     \dst, [\tmp]
+       .endif
+#endif
        .endm
 
        /*
@@ -206,8 +219,13 @@ lr .req    x30             // link register
         *       while <src> needs to be preserved.
         */
        .macro  str_l, src, sym, tmp
+#ifndef MODULE
        adrp    \tmp, \sym
        str     \src, [\tmp, :lo12:\sym]
+#else
+       adr_l   \tmp, \sym
+       str     \src, [\tmp]
+#endif
        .endm
 
        /*
index f2bcbe2d98895ea35b95b8a1e518d808738284b4..86c404171305abd290a6a85d7a5edd69c55ecd02 100644 (file)
@@ -9,9 +9,17 @@
 
 struct task_struct;
 
+/*
+ * We don't use read_sysreg() as we want the compiler to cache the value where
+ * possible.
+ */
 static __always_inline struct task_struct *get_current(void)
 {
-       return (struct task_struct *)read_sysreg(sp_el0);
+       unsigned long sp_el0;
+
+       asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+       return (struct task_struct *)sp_el0;
 }
 
 #define current get_current()
index 290a84f3351f706bd026e00364c2d433a4852aed..e04082700bb16c3598333de72b3c64659be63e5e 100644 (file)
@@ -524,7 +524,8 @@ EXPORT_SYMBOL(dummy_dma_ops);
 
 static int __init arm64_dma_init(void)
 {
-       if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+       if (swiotlb_force == SWIOTLB_FORCE ||
+           max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
                swiotlb = 1;
 
        return atomic_pool_init();
index a78a5c401806c80ee63818c8d353f3ca44bf58d3..156169c6981ba09ef815308ae847d84ccc02e4f4 100644 (file)
@@ -88,21 +88,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
                        break;
 
                pud = pud_offset(pgd, addr);
-               printk(", *pud=%016llx", pud_val(*pud));
+               pr_cont(", *pud=%016llx", pud_val(*pud));
                if (pud_none(*pud) || pud_bad(*pud))
                        break;
 
                pmd = pmd_offset(pud, addr);
-               printk(", *pmd=%016llx", pmd_val(*pmd));
+               pr_cont(", *pmd=%016llx", pmd_val(*pmd));
                if (pmd_none(*pmd) || pmd_bad(*pmd))
                        break;
 
                pte = pte_offset_map(pmd, addr);
-               printk(", *pte=%016llx", pte_val(*pte));
+               pr_cont(", *pte=%016llx", pte_val(*pte));
                pte_unmap(pte);
        } while(0);
 
-       printk("\n");
+       pr_cont("\n");
 }
 
 #ifdef CONFIG_ARM64_HW_AFDBM
index 964b7549af5cc7f827c57fe17709e7575a471c56..e25584d723960e73fb8eec8d1a5f48fa57197582 100644 (file)
@@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                ncontig = find_num_contig(vma->vm_mm, addr, cpte,
                                          *cpte, &pgsize);
                for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
-                       changed = ptep_set_access_flags(vma, addr, cpte,
+                       changed |= ptep_set_access_flags(vma, addr, cpte,
                                                        pfn_pte(pfn,
                                                                hugeprot),
                                                        dirty);
index 212c4d1e2f26df7f291270fb461abac912ce7161..716d1226ba6925babc28bc6c14dc175e739c7f57 100644 (file)
@@ -401,7 +401,8 @@ static void __init free_unused_memmap(void)
  */
 void __init mem_init(void)
 {
-       if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+       if (swiotlb_force == SWIOTLB_FORCE ||
+           max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
                swiotlb_init(1);
 
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
index 6a02b3a3fa6513a4f517c31c88aa435ddc766f3a..e92fb190e2d628e878b209a14e8c8be551510654 100644 (file)
@@ -521,6 +521,9 @@ void *kvm_mips_build_exit(void *addr)
        uasm_i_and(&p, V0, V0, AT);
        uasm_i_lui(&p, AT, ST0_CU0 >> 16);
        uasm_i_or(&p, V0, V0, AT);
+#ifdef CONFIG_64BIT
+       uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
+#endif
        uasm_i_mtc0(&p, V0, C0_STATUS);
        uasm_i_ehb(&p);
 
@@ -643,7 +646,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
 
        /* Setup status register for running guest in UM */
        uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
-       UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
+       UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
        uasm_i_and(&p, V1, V1, AT);
        uasm_i_mtc0(&p, V1, C0_STATUS);
        uasm_i_ehb(&p);
index 06a60b19acfb53c2788ba6b9c79de983bb6fe43e..29ec9ab3fd5570737caa7f5c63768177a5caffa7 100644 (file)
@@ -360,8 +360,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
        dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
 
        /* Invalidate the icache for these ranges */
-       local_flush_icache_range((unsigned long)gebase,
-                               (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+       flush_icache_range((unsigned long)gebase,
+                          (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
 
        /*
         * Allocate comm page for guest kernel, a TLB will be reserved for
index ef31fc24344e983c67b6f2b9c185a9275d456ed1..552544616b9d93ab6838a998ffb0b6ad9a7f1574 100644 (file)
@@ -44,6 +44,8 @@ SECTIONS
         /* Read-only sections, merged into text segment: */
         . = LOAD_BASE ;
 
+       _text = .;
+
        /* _s_kernel_ro must be page aligned */
        . = ALIGN(PAGE_SIZE);
        _s_kernel_ro = .;
index 7581330ea35be1e15498cf5cef9bbcbd3889aab9..88fe0aad4390b10830ce1bc1be62925d4b2d4bbc 100644 (file)
@@ -49,7 +49,6 @@ struct thread_info {
 #define TIF_POLLING_NRFLAG     3       /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE             5       /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
index da0d9cb63403d4b3b4f9647cf8415dd11db30fd3..1e22f981cd81fb0cf840407210d499cab0319e0e 100644 (file)
@@ -235,9 +235,26 @@ void __init time_init(void)
 
        cr16_hz = 100 * PAGE0->mem_10msec;  /* Hz */
 
-       /* register at clocksource framework */
-       clocksource_register_hz(&clocksource_cr16, cr16_hz);
-
        /* register as sched_clock source */
        sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
 }
+
+static int __init init_cr16_clocksource(void)
+{
+       /*
+        * The cr16 interval timers are not syncronized across CPUs, so mark
+        * them unstable and lower rating on SMP systems.
+        */
+       if (num_online_cpus() > 1) {
+               clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+               clocksource_cr16.rating = 0;
+       }
+
+       /* register at clocksource framework */
+       clocksource_register_hz(&clocksource_cr16,
+               100 * PAGE0->mem_10msec);
+
+       return 0;
+}
+
+device_initcall(init_cr16_clocksource);
index 8ff9253930af776b5ca7d408f4bd134cdb88c9b0..1a0b4f63f0e90fbb4e4cb58ea6da95d326bcba3c 100644 (file)
@@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
            tsk->comm, code, address);
        print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
 
-       pr_cont(" trap #%lu: %s%c", code, trap_name(code),
+       pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
                vma ? ',':'\n');
 
        if (vma)
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..2c3413b
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
index 6b246aadf311706849341dac2d0eafee293340f1..1b5c5ee9fc1b60878844cd67cb6a6cb12800a563 100644 (file)
@@ -94,7 +94,7 @@ static void update_mt_scaling(void)
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  */
-static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk)
 {
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
@@ -138,7 +138,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        }
        account_user_time(tsk, user);
        tsk->utimescaled += user_scaled;
-       account_system_time(tsk, hardirq_offset, system);
+       account_system_time(tsk, 0, system);
        tsk->stimescaled += system_scaled;
 
        steal = S390_lowcore.steal_timer;
@@ -152,7 +152,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 
 void vtime_task_switch(struct task_struct *prev)
 {
-       do_account_vtime(prev, 0);
+       do_account_vtime(prev);
        prev->thread.user_timer = S390_lowcore.user_timer;
        prev->thread.system_timer = S390_lowcore.system_timer;
        S390_lowcore.user_timer = current->thread.user_timer;
@@ -166,7 +166,7 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_account_user(struct task_struct *tsk)
 {
-       if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+       if (do_account_vtime(tsk))
                virt_timer_expire();
 }
 
index 31c34ee131f34505872a018b75a8489f1ac9c854..6ef688a1ef3e0f022032e5317662b9011c8f74c4 100644 (file)
@@ -1020,7 +1020,8 @@ struct {
        const char *basename;
        struct simd_skcipher_alg *simd;
 } aesni_simd_skciphers2[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
+#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
+    IS_BUILTIN(CONFIG_CRYPTO_PCBC)
        {
                .algname        = "pcbc(aes)",
                .drvname        = "pcbc-aes-aesni",
index b47edb8f52566e223f89800b5b0465349331985a..410efb2c7b80028ba63367e5ed6d5e27518b0f24 100644 (file)
@@ -68,12 +68,10 @@ static struct dma_map_ops swiotlb_dma_ops = {
  */
 int __init pci_swiotlb_detect_override(void)
 {
-       int use_swiotlb = swiotlb | swiotlb_force;
-
-       if (swiotlb_force)
+       if (swiotlb_force == SWIOTLB_FORCE)
                swiotlb = 1;
 
-       return use_swiotlb;
+       return swiotlb;
 }
 IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
                  pci_xen_swiotlb_detect,
index 56628a44668b7ec43cee087af817ebb1725cf7cd..cedbba0f3402d2343ce069fe3ed6a07f44f68907 100644 (file)
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
        return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
 }
 
+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+                              struct segmented_address addr,
+                              void *data,
+                              unsigned int size)
+{
+       int rc;
+       ulong linear;
+
+       rc = linearize(ctxt, addr, size, true, &linear);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+}
+
 /*
  * Prefetch the remaining bytes of the instruction without crossing page
  * boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                    &ctxt->exception);
 }
 
-/* Does not support long mode */
 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                     u16 selector, int seg, u8 cpl,
                                     enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
 
        rpl = selector & 3;
 
-       /* NULL selector is not valid for TR, CS and SS (except for long mode) */
-       if ((seg == VCPU_SREG_CS
-            || (seg == VCPU_SREG_SS
-                && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
-            || seg == VCPU_SREG_TR)
-           && null_selector)
-               goto exception;
-
        /* TR should be in GDT only */
        if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
                goto exception;
 
-       if (null_selector) /* for NULL selector skip all following checks */
+       /* NULL selector is not valid for TR, CS and (except for long mode) SS */
+       if (null_selector) {
+               if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
+                       goto exception;
+
+               if (seg == VCPU_SREG_SS) {
+                       if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
+                               goto exception;
+
+                       /*
+                        * ctxt->ops->set_segment expects the CPL to be in
+                        * SS.DPL, so fake an expand-up 32-bit data segment.
+                        */
+                       seg_desc.type = 3;
+                       seg_desc.p = 1;
+                       seg_desc.s = 1;
+                       seg_desc.dpl = cpl;
+                       seg_desc.d = 1;
+                       seg_desc.g = 1;
+               }
+
+               /* Skip all following checks */
                goto load;
+       }
 
        ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
        if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                   u16 selector, int seg)
 {
        u8 cpl = ctxt->ops->cpl(ctxt);
+
+       /*
+        * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
+        * they can load it at CPL<3 (Intel's manual says only LSS can,
+        * but it's wrong).
+        *
+        * However, the Intel manual says that putting IST=1/DPL=3 in
+        * an interrupt gate will result in SS=3 (the AMD manual instead
+        * says it doesn't), so allow SS=3 in __load_segment_descriptor
+        * and only forbid it here.
+        */
+       if (seg == VCPU_SREG_SS && selector == 3 &&
+           ctxt->mode == X86EMUL_MODE_PROT64)
+               return emulate_exception(ctxt, GP_VECTOR, 0, true);
+
        return __load_segment_descriptor(ctxt, selector, seg, cpl,
                                         X86_TRANSFER_NONE, NULL);
 }
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
        }
        /* Disable writeback. */
        ctxt->dst.type = OP_NONE;
-       return segmented_write(ctxt, ctxt->dst.addr.mem,
-                              &desc_ptr, 2 + ctxt->op_bytes);
+       return segmented_write_std(ctxt, ctxt->dst.addr.mem,
+                                  &desc_ptr, 2 + ctxt->op_bytes);
 }
 
 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
        else
                size = offsetof(struct fxregs_state, xmm_space[0]);
 
-       return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+       return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
 }
 
 static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
index 5fe290c1b7d892729e7cfd637433b8171423f407..2f6ef5121a4ca36ac73639e5ffc6aa86624067d2 100644 (file)
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
        jump_label_rate_limit(&apic_hw_disabled, HZ);
        jump_label_rate_limit(&apic_sw_disabled, HZ);
 }
+
+void kvm_lapic_exit(void)
+{
+       static_key_deferred_flush(&apic_hw_disabled);
+       static_key_deferred_flush(&apic_sw_disabled);
+}
index e0c80233b3e17a1793cbafd422688091034b15c5..ff8039d616723fd79c493c0a1369658e440224b2 100644 (file)
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
 
 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
 void kvm_lapic_init(void);
+void kvm_lapic_exit(void);
 
 #define VEC_POS(v) ((v) & (32 - 1))
 #define REG_POS(v) (((v) >> 5) << 4)
index 24db5fb6f575af27d3b61a67b15ce9996158ed8b..a236decb81e4ffca42525d8a9cd3d5d8c98916a9 100644 (file)
@@ -132,12 +132,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 
 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
 
-#define VMX_VPID_EXTENT_SUPPORTED_MASK         \
-       (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
-       VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
-       VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
-       VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
-
 /*
  * Hyper-V requires all of these, so mark them as supported even though
  * they are just treated the same as all-context.
@@ -10473,12 +10467,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
            !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
                nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-               goto out;
+               return 1;
        }
        if (vmcs12->vmcs_link_pointer != -1ull) {
                nested_vmx_entry_failure(vcpu, vmcs12,
                        EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
-               goto out;
+               return 1;
        }
 
        /*
@@ -10498,7 +10492,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                     ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
                        nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-                       goto out;
+                       return 1;
                }
        }
 
@@ -10516,7 +10510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                    ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
                        nested_vmx_entry_failure(vcpu, vmcs12,
                                EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
-                       goto out;
+                       return 1;
                }
        }
 
index 51ccfe08e32ff0570517fda01277dc0085721e94..57d8a856cdc5ce938efb73fd8fae144867abde76 100644 (file)
@@ -3070,6 +3070,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
+static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
+
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
 {
@@ -3106,10 +3108,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
+               u32 hflags = vcpu->arch.hflags;
                if (events->smi.smm)
-                       vcpu->arch.hflags |= HF_SMM_MASK;
+                       hflags |= HF_SMM_MASK;
                else
-                       vcpu->arch.hflags &= ~HF_SMM_MASK;
+                       hflags &= ~HF_SMM_MASK;
+               kvm_set_hflags(vcpu, hflags);
+
                vcpu->arch.smi_pending = events->smi.pending;
                if (events->smi.smm_inside_nmi)
                        vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
@@ -3337,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 
        switch (cap->cap) {
        case KVM_CAP_HYPERV_SYNIC:
+               if (!irqchip_in_kernel(vcpu->kvm))
+                       return -EINVAL;
                return kvm_hv_activate_synic(vcpu);
        default:
                return -EINVAL;
@@ -6040,6 +6047,7 @@ out:
 
 void kvm_arch_exit(void)
 {
+       kvm_lapic_exit();
        perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
 
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
index e76d1af60f7ad76a12fa4f01cf61b3c508ae0453..bb660e53cbd6ba51eace412622fdd7342939ddc1 100644 (file)
@@ -1172,6 +1172,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                set_memory_ro((unsigned long)header, header->pages);
                prog->bpf_func = (void *)image;
                prog->jited = 1;
+       } else {
+               prog = orig_prog;
        }
 
 out_addrs:
index 936a488d6cf6df3c2aadbbdbc036b8eb06701cb0..274dfc48184977db435a9c5c78607cd8a182067c 100644 (file)
@@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
        return 0;
 }
 
+#define OVERFLOW_ADDR_SHIFT    (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK     (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT           (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+       u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+       u64 end_hi = 0;
+       char buf[64];
+
+       if (md->num_pages == 0) {
+               end = 0;
+       } else if (md->num_pages > EFI_PAGES_MAX ||
+                  EFI_PAGES_MAX - md->num_pages <
+                  (md->phys_addr >> EFI_PAGE_SHIFT)) {
+               end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+                       >> OVERFLOW_ADDR_SHIFT;
+
+               if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+                       end_hi += 1;
+       } else {
+               return true;
+       }
+
+       pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+       if (end_hi) {
+               pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+                       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+                       md->phys_addr, end_hi, end);
+       } else {
+               pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+                       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+                       md->phys_addr, end);
+       }
+       return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+       efi_memory_desc_t *out = efi.memmap.map;
+       const efi_memory_desc_t *in = out;
+       const efi_memory_desc_t *end = efi.memmap.map_end;
+       int i, n_removal;
+
+       for (i = n_removal = 0; in < end; i++) {
+               if (efi_memmap_entry_valid(in, i)) {
+                       if (out != in)
+                               memcpy(out, in, efi.memmap.desc_size);
+                       out = (void *)out + efi.memmap.desc_size;
+               } else {
+                       n_removal++;
+               }
+               in = (void *)in + efi.memmap.desc_size;
+       }
+
+       if (n_removal > 0) {
+               u64 size = efi.memmap.nr_map - n_removal;
+
+               pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+               efi_memmap_install(efi.memmap.phys_map, size);
+       }
+}
+
 void __init efi_print_memmap(void)
 {
        efi_memory_desc_t *md;
@@ -472,6 +536,8 @@ void __init efi_init(void)
                }
        }
 
+       efi_clean_memmap();
+
        if (efi_enabled(EFI_DBG))
                efi_print_memmap();
 }
index 10aca63a50d7bbff9fa94dec60e0df00591ccbd5..30031d5293c483202c526d5045cda23be6617359 100644 (file)
@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
 
        new_size = efi.memmap.desc_size * num_entries;
 
-       new_phys = memblock_alloc(new_size, 0);
+       new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
                pr_err("Could not allocate boot services memmap\n");
                return;
@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
        }
 
        new_size = efi.memmap.desc_size * num_entries;
-       new_phys = memblock_alloc(new_size, 0);
+       new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
                pr_err("Failed to allocate new EFI memmap\n");
                return;
index a9fafb5c873897eec1685861180cf4ac5fd826e3..a0b36a9d5df149e6370fb3b6be0d5894bae72bd7 100644 (file)
@@ -48,7 +48,7 @@ int __init pci_xen_swiotlb_detect(void)
         * activate this IOMMU. If running as PV privileged, activate it
         * irregardless.
         */
-       if ((xen_initial_domain() || swiotlb || swiotlb_force))
+       if (xen_initial_domain() || swiotlb || swiotlb_force == SWIOTLB_FORCE)
                xen_swiotlb = 1;
 
        /* If we are running under Xen, we MUST disable the native SWIOTLB.
index 8c394e30e5fe2afcafa32b8e7fee658b5b1c27f3..f3f7b41116f7afcd3a37880d6e814b67cac0f005 100644 (file)
@@ -713,10 +713,9 @@ static void __init xen_reserve_xen_mfnlist(void)
                size = PFN_PHYS(xen_start_info->nr_p2m_frames);
        }
 
-       if (!xen_is_e820_reserved(start, size)) {
-               memblock_reserve(start, size);
+       memblock_reserve(start, size);
+       if (!xen_is_e820_reserved(start, size))
                return;
-       }
 
 #ifdef CONFIG_X86_32
        /*
@@ -727,6 +726,7 @@ static void __init xen_reserve_xen_mfnlist(void)
        BUG();
 #else
        xen_relocate_p2m();
+       memblock_free(start, size);
 #endif
 }
 
index ed89c8f4b2a0497af0e45f075e4781dbb5cf87d2..f8c82a9b401222c84d5d014ac339439a1708caba 100644 (file)
@@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
 
-       if (discard) {
-               ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
-                               BLKDEV_DISCARD_ZERO, biop);
-               if (ret == 0 || (ret && ret != -EOPNOTSUPP))
-                       goto out;
-       }
-
        ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
                        biop);
        if (ret == 0 || (ret && ret != -EOPNOTSUPP))
@@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        struct bio *bio = NULL;
        struct blk_plug plug;
 
+       if (discard) {
+               if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+                               BLKDEV_DISCARD_ZERO))
+                       return 0;
+       }
+
        blk_start_plug(&plug);
        ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
                        &bio, discard);
index 6e82769f4042c2f57af7976b1b7b4342eea1306d..f0a9c07b4c7a5ef9e96985a89c5c000d62a78cd0 100644 (file)
@@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+       __releases(lock)
+       __acquires(lock)
 {
        struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
        DEFINE_WAIT(wait);
@@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
                if (may_queue(rwb, rqw, &wait, rw))
                        break;
 
-               if (lock)
+               if (lock) {
                        spin_unlock_irq(lock);
-
-               io_schedule();
-
-               if (lock)
+                       io_schedule();
                        spin_lock_irq(lock);
+               } else
+                       io_schedule();
        } while (1);
 
        finish_wait(&rqw->wait, &wait);
@@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
 {
        unsigned int ret = 0;
 
index 472211fa183a6488ef93676a2f89b2f345947c9a..3bd15d8095b101233455d7985b834f828d89be92 100644 (file)
@@ -16,7 +16,7 @@
 static inline sector_t blk_zone_start(struct request_queue *q,
                                      sector_t sector)
 {
-       sector_t zone_mask = blk_queue_zone_size(q) - 1;
+       sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
 
        return sector & ~zone_mask;
 }
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
                return -EINVAL;
 
        /* Check alignment (handle eventual smaller last zone) */
-       zone_sectors = blk_queue_zone_size(q);
+       zone_sectors = blk_queue_zone_sectors(q);
        if (sector & (zone_sectors - 1))
                return -EINVAL;
 
index d7beb6bbbf668fe62083cece4d7a6cb1eafe9443..7afb9907821fb7abe61d76ac1f3ce408bb14dbd5 100644 (file)
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
                              struct block_device *bdev,
                              sector_t from, sector_t size)
 {
-       unsigned int zone_size = bdev_zone_size(bdev);
+       unsigned int zone_sectors = bdev_zone_sectors(bdev);
 
        /*
         * If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
         * regular block devices (no zone operation) and their zone size will
         * be reported as 0. Allow this case.
         */
-       if (!zone_size)
+       if (!zone_sectors)
                return true;
 
        /*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
         * use it. Check the zone size too: it should be a power of 2 number
         * of sectors.
         */
-       if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+       if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
                u32 rem;
 
-               div_u64_rem(from, zone_size, &rem);
+               div_u64_rem(from, zone_sectors, &rem);
                if (rem)
                        return false;
                if ((from + size) < get_capacity(disk)) {
-                       div_u64_rem(size, zone_size, &rem);
+                       div_u64_rem(size, zone_sectors, &rem);
                        if (rem)
                                return false;
                }
 
        } else {
 
-               if (from & (zone_size - 1))
+               if (from & (zone_sectors - 1))
                        return false;
                if ((from + size) < get_capacity(disk) &&
-                   (size & (zone_size - 1)))
+                   (size & (zone_sectors - 1)))
                        return false;
 
        }
index 13caebd679f5b07503941f1023cf468901aeec36..8c4e0a18460a78df600067da69537e6256d6bc1c 100644 (file)
@@ -114,7 +114,7 @@ void __init acpi_watchdog_init(void)
        pdev = platform_device_register_simple("wdat_wdt", PLATFORM_DEVID_NONE,
                                               resources, nresources);
        if (IS_ERR(pdev))
-               pr_err("Failed to create platform device\n");
+               pr_err("Device creation failed: %ld\n", PTR_ERR(pdev));
 
        kfree(resources);
 
index f8d65647ea790dd161b07221b9be2bdead4447d0..fb19e1cdb6415ac8d1913e7017106911dcb7a7dd 100644 (file)
@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
        if (check_children && list_empty(&adev->children))
                return -ENODEV;
 
-       return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+       /*
+        * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+        * device ID, it is better to make it look less attractive here, so that
+        * the other device with the same _ADR value (that may not have a valid
+        * device ID) can be matched going forward.  [This means a second spec
+        * violation in a row, so whatever we do here is best effort anyway.]
+        */
+       return sta_present && list_empty(&adev->pnp.ids) ?
+                       FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
 }
 
 struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@@ -250,7 +258,6 @@ int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev)
        return 0;
 
  err:
-       acpi_dma_deconfigure(dev);
        ACPI_COMPANION_SET(dev, NULL);
        put_device(dev);
        put_device(&acpi_dev->dev);
index 1b41a2739dac1dcab72aa352e9add209b2313711..0c452265c11110213ddde5c5c4440e52d2a523a5 100644 (file)
@@ -37,6 +37,7 @@ void acpi_amba_init(void);
 static inline void acpi_amba_init(void) {}
 #endif
 int acpi_sysfs_init(void);
+void acpi_gpe_apply_masked_gpes(void);
 void acpi_container_init(void);
 void acpi_memory_hotplug_init(void);
 #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
index 45dec874ea55b820281457a3ae5de2fe1f12403c..192691880d55c9499e1d77e68058a56d8e5318f3 100644 (file)
@@ -2074,6 +2074,7 @@ int __init acpi_scan_init(void)
                }
        }
 
+       acpi_gpe_apply_masked_gpes();
        acpi_update_all_gpes();
        acpi_ec_ecdt_start();
 
index 703c26e7022cbf186d9a1ed3a60cd25c6fd1c049..cf05ae973381178cc066e1c235c38afb419e1f01 100644 (file)
@@ -708,6 +708,62 @@ end:
        return result ? result : size;
 }
 
+/*
+ * A Quirk Mechanism for GPE Flooding Prevention:
+ *
+ * Quirks may be needed to prevent GPE flooding on a specific GPE. The
+ * flooding typically cannot be detected and automatically prevented by
+ * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
+ * the AML tables. This normally indicates a feature gap in Linux, thus
+ * instead of providing endless quirk tables, we provide a boot parameter
+ * for those who want this quirk. For example, if the users want to prevent
+ * the GPE flooding for GPE 00, they need to specify the following boot
+ * parameter:
+ *   acpi_mask_gpe=0x00
+ * The masking status can be modified by the following runtime controlling
+ * interface:
+ *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
+ */
+
+/*
+ * Currently, the GPE flooding prevention only supports to mask the GPEs
+ * numbered from 00 to 7f.
+ */
+#define ACPI_MASKABLE_GPE_MAX  0x80
+
+static u64 __initdata acpi_masked_gpes;
+
+static int __init acpi_gpe_set_masked_gpes(char *val)
+{
+       u8 gpe;
+
+       if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
+               return -EINVAL;
+       acpi_masked_gpes |= ((u64)1<<gpe);
+
+       return 1;
+}
+__setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
+
+void __init acpi_gpe_apply_masked_gpes(void)
+{
+       acpi_handle handle;
+       acpi_status status;
+       u8 gpe;
+
+       for (gpe = 0;
+            gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
+            gpe++) {
+               if (acpi_masked_gpes & ((u64)1<<gpe)) {
+                       status = acpi_get_gpe_device(gpe, &handle);
+                       if (ACPI_SUCCESS(status)) {
+                               pr_info("Masking GPE 0x%x.\n", gpe);
+                               (void)acpi_mask_gpe(handle, gpe, TRUE);
+                       }
+               }
+       }
+}
+
 void acpi_irq_stats_init(void)
 {
        acpi_status status;
index a5e1262b964b8f987480152cb5feb0dbc9d81e47..2997026b4dfb00c1daec88f1f18264656a61ffb0 100644 (file)
@@ -626,6 +626,7 @@ static int genpd_runtime_resume(struct device *dev)
 
  out:
        /* Measure resume latency. */
+       time_start = 0;
        if (timed && runtime_pm)
                time_start = ktime_get();
 
index 38c576f76d36fca1f43bc37b6c733ba6bb222849..50a2020b5b724fd657c01b6d1a27c33a68a27fd0 100644 (file)
@@ -1042,6 +1042,7 @@ static int __init nbd_init(void)
                return -ENOMEM;
 
        for (i = 0; i < nbds_max; i++) {
+               struct request_queue *q;
                struct gendisk *disk = alloc_disk(1 << part_shift);
                if (!disk)
                        goto out;
@@ -1067,12 +1068,13 @@ static int __init nbd_init(void)
                 * every gendisk to have its very own request_queue struct.
                 * These structs are big so we dynamically allocate them.
                 */
-               disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
-               if (!disk->queue) {
+               q = blk_mq_init_queue(&nbd_dev[i].tag_set);
+               if (IS_ERR(q)) {
                        blk_mq_free_tag_set(&nbd_dev[i].tag_set);
                        put_disk(disk);
                        goto out;
                }
+               disk->queue = q;
 
                /*
                 * Tell the block layer that we are not a rotational device
index 5545a679abd8887123fc83d57beb37dede77a685..10332c24f9610d7e80b154bf36eebb0354ac4576 100644 (file)
@@ -56,6 +56,7 @@ struct virtblk_req {
        struct virtio_blk_outhdr out_hdr;
        struct virtio_scsi_inhdr in_hdr;
        u8 status;
+       u8 sense[SCSI_SENSE_BUFFERSIZE];
        struct scatterlist sg[];
 };
 
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
        }
 
        if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
-               sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+               memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+               sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
                sgs[num_out + num_in++] = &sense;
                sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
                sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
        if (err)
                goto out_put_disk;
 
-       q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+       q = blk_mq_init_queue(&vblk->tag_set);
        if (IS_ERR(q)) {
                err = -ENOMEM;
                goto out_free_tags;
        }
+       vblk->disk->queue = q;
 
        q->queuedata = vblk;
 
index 15f58ab44d0b429fc7b5ebf0ba7e1cdfcc6c914d..e5ab7d9e8c452f2feac3e2f374c6e1639317756b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/genhd.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/backing-dev.h>
 #include <linux/string.h>
 #include <linux/vmalloc.h>
 #include <linux/err.h>
@@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
        return bvec->bv_len != PAGE_SIZE;
 }
 
+static void zram_revalidate_disk(struct zram *zram)
+{
+       revalidate_disk(zram->disk);
+       /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
+       zram->disk->queue->backing_dev_info.capabilities |=
+               BDI_CAP_STABLE_WRITES;
+}
+
 /*
  * Check if request is within bounds and aligned on zram logical blocks.
  */
@@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev,
        zram->comp = comp;
        zram->disksize = disksize;
        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+       zram_revalidate_disk(zram);
        up_write(&zram->init_lock);
 
-       /*
-        * Revalidate disk out of the init_lock to avoid lockdep splat.
-        * It's okay because disk's capacity is protected by init_lock
-        * so that revalidate_disk always sees up-to-date capacity.
-        */
-       revalidate_disk(zram->disk);
-
        return len;
 
 out_destroy_comp:
@@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev,
        /* Make sure all the pending I/O are finished */
        fsync_bdev(bdev);
        zram_reset_device(zram);
-       revalidate_disk(zram->disk);
+       zram_revalidate_disk(zram);
        bdput(bdev);
 
        mutex_lock(&bdev->bd_mutex);
index 5eb05dbf59b84d27d79106adeb7405d15de1433a..fc585f370549a336ea12cc5c323dbc77aec69825 100644 (file)
@@ -768,5 +768,5 @@ fail:
        kfree(clks);
        iounmap(base);
 }
-CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
-CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
index 9375777776d994071a476d2e198be933838db685..b533f99550e1b97dd55944a85cbf5d64153309f5 100644 (file)
  * @smstpcr: module stop control register
  * @mstpsr: module stop status register (optional)
  * @lock: protects writes to SMSTPCR
+ * @width_8bit: registers are 8-bit, not 32-bit
  */
 struct mstp_clock_group {
        struct clk_onecell_data data;
        void __iomem *smstpcr;
        void __iomem *mstpsr;
        spinlock_t lock;
+       bool width_8bit;
 };
 
 /**
@@ -59,6 +61,18 @@ struct mstp_clock {
 
 #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
 
+static inline u32 cpg_mstp_read(struct mstp_clock_group *group,
+                               u32 __iomem *reg)
+{
+       return group->width_8bit ? readb(reg) : clk_readl(reg);
+}
+
+static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val,
+                                 u32 __iomem *reg)
+{
+       group->width_8bit ? writeb(val, reg) : clk_writel(val, reg);
+}
+
 static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
 {
        struct mstp_clock *clock = to_mstp_clock(hw);
@@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
 
        spin_lock_irqsave(&group->lock, flags);
 
-       value = clk_readl(group->smstpcr);
+       value = cpg_mstp_read(group, group->smstpcr);
        if (enable)
                value &= ~bitmask;
        else
                value |= bitmask;
-       clk_writel(value, group->smstpcr);
+       cpg_mstp_write(group, value, group->smstpcr);
 
        spin_unlock_irqrestore(&group->lock, flags);
 
@@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
                return 0;
 
        for (i = 1000; i > 0; --i) {
-               if (!(clk_readl(group->mstpsr) & bitmask))
+               if (!(cpg_mstp_read(group, group->mstpsr) & bitmask))
                        break;
                cpu_relax();
        }
@@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
        u32 value;
 
        if (group->mstpsr)
-               value = clk_readl(group->mstpsr);
+               value = cpg_mstp_read(group, group->mstpsr);
        else
-               value = clk_readl(group->smstpcr);
+               value = cpg_mstp_read(group, group->smstpcr);
 
        return !(value & BIT(clock->bit_index));
 }
@@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
                return;
        }
 
+       if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks"))
+               group->width_8bit = true;
+
        for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
                clks[i] = ERR_PTR(-ENOENT);
 
index bc97b6a4b1cf67a4897a412ff785e1350b4ee0ce..7fcaf26e8f819b7665f6e9bf83c07a1b705b1d65 100644 (file)
@@ -26,6 +26,8 @@ static const struct of_device_id machines[] __initconst = {
        { .compatible = "allwinner,sun8i-a83t", },
        { .compatible = "allwinner,sun8i-h3", },
 
+       { .compatible = "apm,xgene-shadowcat", },
+
        { .compatible = "arm,integrator-ap", },
        { .compatible = "arm,integrator-cp", },
 
index 6acbd4af632e2bdab99f984cd0b311e5f7a32e29..f91c25718d164c9d9339acf671d67937995fe076 100644 (file)
@@ -857,13 +857,13 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
        NULL,
 };
 
-static void intel_pstate_hwp_set(const struct cpumask *cpumask)
+static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
 {
        int min, hw_min, max, hw_max, cpu, range, adj_range;
        struct perf_limits *perf_limits = limits;
        u64 value, cap;
 
-       for_each_cpu(cpu, cpumask) {
+       for_each_cpu(cpu, policy->cpus) {
                int max_perf_pct, min_perf_pct;
                struct cpudata *cpu_data = all_cpu_data[cpu];
                s16 epp;
@@ -949,7 +949,7 @@ skip_epp:
 static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
 {
        if (hwp_active)
-               intel_pstate_hwp_set(policy->cpus);
+               intel_pstate_hwp_set(policy);
 
        return 0;
 }
@@ -968,19 +968,28 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
 
 static int intel_pstate_resume(struct cpufreq_policy *policy)
 {
+       int ret;
+
        if (!hwp_active)
                return 0;
 
+       mutex_lock(&intel_pstate_limits_lock);
+
        all_cpu_data[policy->cpu]->epp_policy = 0;
 
-       return intel_pstate_hwp_set_policy(policy);
+       ret = intel_pstate_hwp_set_policy(policy);
+
+       mutex_unlock(&intel_pstate_limits_lock);
+
+       return ret;
 }
 
-static void intel_pstate_hwp_set_online_cpus(void)
+static void intel_pstate_update_policies(void)
 {
-       get_online_cpus();
-       intel_pstate_hwp_set(cpu_online_mask);
-       put_online_cpus();
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               cpufreq_update_policy(cpu);
 }
 
 /************************** debugfs begin ************************/
@@ -1018,10 +1027,6 @@ static void __init intel_pstate_debug_expose_params(void)
        struct dentry *debugfs_parent;
        int i = 0;
 
-       if (hwp_active ||
-           pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
-               return;
-
        debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
        if (IS_ERR_OR_NULL(debugfs_parent))
                return;
@@ -1105,11 +1110,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
 
        limits->no_turbo = clamp_t(int, input, 0, 1);
 
-       if (hwp_active)
-               intel_pstate_hwp_set_online_cpus();
-
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        return count;
 }
 
@@ -1134,11 +1138,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->max_perf_pct);
        limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
 
-       if (hwp_active)
-               intel_pstate_hwp_set_online_cpus();
-
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        return count;
 }
 
@@ -1163,11 +1166,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
                                   limits->min_perf_pct);
        limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
 
-       if (hwp_active)
-               intel_pstate_hwp_set_online_cpus();
-
        mutex_unlock(&intel_pstate_limits_lock);
 
+       intel_pstate_update_policies();
+
        return count;
 }
 
@@ -2153,8 +2155,12 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
        if (per_cpu_limits)
                perf_limits = cpu->perf_limits;
 
+       mutex_lock(&intel_pstate_limits_lock);
+
        intel_pstate_update_perf_limits(policy, perf_limits);
 
+       mutex_unlock(&intel_pstate_limits_lock);
+
        return 0;
 }
 
@@ -2487,7 +2493,10 @@ hwp_cpu_matched:
        if (rc)
                goto out;
 
-       intel_pstate_debug_expose_params();
+       if (intel_pstate_driver == &intel_pstate && !hwp_active &&
+           pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
+               intel_pstate_debug_expose_params();
+
        intel_pstate_sysfs_expose_params();
 
        if (hwp_active)
index a324801d6a664cd707d70d673611cade4d43bd9d..47206a21bb901f424c4e331c9db6eaedef69d93d 100644 (file)
@@ -593,11 +593,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
        list_add(&devfreq->node, &devfreq_list);
 
        governor = find_devfreq_governor(devfreq->governor_name);
-       if (!IS_ERR(governor))
-               devfreq->governor = governor;
-       if (devfreq->governor)
-               err = devfreq->governor->event_handler(devfreq,
-                                       DEVFREQ_GOV_START, NULL);
+       if (IS_ERR(governor)) {
+               dev_err(dev, "%s: Unable to find governor for the device\n",
+                       __func__);
+               err = PTR_ERR(governor);
+               goto err_init;
+       }
+
+       devfreq->governor = governor;
+       err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
+                                               NULL);
        if (err) {
                dev_err(dev, "%s: Unable to start governor for the device\n",
                        __func__);
index a8ed7792ece2f199adaedd588b86375c49da6f17..9af86f46fbec0fc0779e4cf211c16227b804510d 100644 (file)
@@ -497,7 +497,7 @@ passive:
        if (IS_ERR(bus->devfreq)) {
                dev_err(dev,
                        "failed to add devfreq dev with passive governor\n");
-               ret = -EPROBE_DEFER;
+               ret = PTR_ERR(bus->devfreq);
                goto err;
        }
 
index e00c9b022964702c076b1a192df35b9bbbf7e113..5a37b9fcf40ddf813a6cf5d2c401bc221fd5e83b 100644 (file)
@@ -24,5 +24,5 @@ config DW_DMAC_PCI
        select DW_DMAC_CORE
        help
          Support the Synopsys DesignWare AHB DMA controller on the
-         platfroms that enumerate it as a PCI device. For example,
+         platforms that enumerate it as a PCI device. For example,
          Intel Medfield has integrated this GPDMA controller.
index 8e67895bcca3a2db74e5b72e00374e2118ce95f7..abcc51b343cecd1629700233e8ca9d8882da2dff 100644 (file)
@@ -64,6 +64,8 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_BDX8  0x6f2e
 #define PCI_DEVICE_ID_INTEL_IOAT_BDX9  0x6f2f
 
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX   0x2021
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
index 90eddd9f07e4fc1b354bad70a935a8cfec20e246..cc5259b881d47ffa48322e2a537397317e8b8be1 100644 (file)
@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
        /* I/OAT v3.3 platforms */
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
        }
 }
 
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+       return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
 static bool is_xeon_cb32(struct pci_dev *pdev)
 {
        return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
-               is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+               is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
 }
 
 bool is_bwd_ioat(struct pci_dev *pdev)
@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
        ioat_chan->completion =
                dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
-                               GFP_KERNEL, &ioat_chan->completion_dma);
+                               GFP_NOWAIT, &ioat_chan->completion_dma);
        if (!ioat_chan->completion)
                return -ENOMEM;
 
@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
               ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 
        order = IOAT_MAX_ORDER;
-       ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+       ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
        if (!ring)
                return -ENOMEM;
 
@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        device->version = readb(device->reg_base + IOAT_VER_OFFSET);
        if (device->version >= IOAT_VER_3_0) {
+               if (is_skx_ioat(pdev))
+                       device->version = IOAT_VER_3_2;
                err = ioat3_dma_probe(device, ioat_dca_enabled);
 
                if (device->version >= IOAT_VER_3_3)
index ac68666cd3f4e99cd3bc2f4ae346f5e78cd218ce..daf479cce69158e480757d269630d4bed7d6bca2 100644 (file)
@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                d->ccr |= CCR_DST_AMODE_POSTINC;
                if (port_window) {
                        d->ccr |= CCR_SRC_AMODE_DBLIDX;
-                       d->ei = 1;
-                       /*
-                        * One frame covers the port_window and by  configure
-                        * the source frame index to be -1 * (port_window - 1)
-                        * we instruct the sDMA that after a frame is processed
-                        * it should move back to the start of the window.
-                        */
-                       d->fi = -(port_window_bytes - 1);
 
                        if (port_window_bytes >= 64)
-                               d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+                               d->csdp |= CSDP_SRC_BURST_64;
                        else if (port_window_bytes >= 32)
-                               d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+                               d->csdp |= CSDP_SRC_BURST_32;
                        else if (port_window_bytes >= 16)
-                               d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+                               d->csdp |= CSDP_SRC_BURST_16;
+
                } else {
                        d->ccr |= CCR_SRC_AMODE_CONSTANT;
                }
@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                d->ccr |= CCR_SRC_AMODE_POSTINC;
                if (port_window) {
                        d->ccr |= CCR_DST_AMODE_DBLIDX;
+                       d->ei = 1;
+                       /*
+                        * One frame covers the port_window and by  configure
+                        * the source frame index to be -1 * (port_window - 1)
+                        * we instruct the sDMA that after a frame is processed
+                        * it should move back to the start of the window.
+                        */
+                       d->fi = -(port_window_bytes - 1);
 
                        if (port_window_bytes >= 64)
-                               d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+                               d->csdp |= CSDP_DST_BURST_64;
                        else if (port_window_bytes >= 32)
-                               d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+                               d->csdp |= CSDP_DST_BURST_32;
                        else if (port_window_bytes >= 16)
-                               d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+                               d->csdp |= CSDP_DST_BURST_16;
                } else {
                        d->ccr |= CCR_DST_AMODE_CONSTANT;
                }
@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                osg->addr = sg_dma_address(sgent);
                osg->en = en;
                osg->fn = sg_dma_len(sgent) / frame_bytes;
-               if (port_window && dir == DMA_MEM_TO_DEV) {
+               if (port_window && dir == DMA_DEV_TO_MEM) {
                        osg->ei = 1;
                        /*
                         * One frame covers the port_window and by  configure
@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
        struct omap_dmadev *od;
        struct resource *res;
        int rc, i, irq;
+       u32 lch_count;
 
        od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
        if (!od)
@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
        spin_lock_init(&od->lock);
        spin_lock_init(&od->irq_lock);
 
-       if (!pdev->dev.of_node) {
-               od->dma_requests = od->plat->dma_attr->lch_count;
-               if (unlikely(!od->dma_requests))
-                       od->dma_requests = OMAP_SDMA_REQUESTS;
-       } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
-                                       &od->dma_requests)) {
+       /* Number of DMA requests */
+       od->dma_requests = OMAP_SDMA_REQUESTS;
+       if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+                                                     "dma-requests",
+                                                     &od->dma_requests)) {
                dev_info(&pdev->dev,
                         "Missing dma-requests property, using %u.\n",
                         OMAP_SDMA_REQUESTS);
-               od->dma_requests = OMAP_SDMA_REQUESTS;
        }
 
-       od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
-                                  sizeof(*od->lch_map), GFP_KERNEL);
+       /* Number of available logical channels */
+       if (!pdev->dev.of_node) {
+               lch_count = od->plat->dma_attr->lch_count;
+               if (unlikely(!lch_count))
+                       lch_count = OMAP_SDMA_CHANNELS;
+       } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
+                                       &lch_count)) {
+               dev_info(&pdev->dev,
+                        "Missing dma-channels property, using %u.\n",
+                        OMAP_SDMA_CHANNELS);
+               lch_count = OMAP_SDMA_CHANNELS;
+       }
+
+       od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+                                  GFP_KERNEL);
        if (!od->lch_map)
                return -ENOMEM;
 
index 87fd01539fcb74daa0c80a62a268311b7125dce1..740bbb942594873b08deecb59c801460dcc868ab 100644 (file)
@@ -448,6 +448,9 @@ struct dma_pl330_chan {
 
        /* for cyclic capability */
        bool cyclic;
+
+       /* for runtime pm tracking */
+       bool active;
 };
 
 struct pl330_dmac {
@@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
                _stop(pch->thread);
                spin_unlock(&pch->thread->dmac->lock);
                power_down = true;
+               pch->active = false;
        } else {
                /* Make sure the PL330 Channel thread is active */
                spin_lock(&pch->thread->dmac->lock);
@@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
                        desc->status = PREP;
                        list_move_tail(&desc->node, &pch->work_list);
                        if (power_down) {
+                               pch->active = true;
                                spin_lock(&pch->thread->dmac->lock);
                                _start(pch->thread);
                                spin_unlock(&pch->thread->dmac->lock);
@@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
        unsigned long flags;
        struct pl330_dmac *pl330 = pch->dmac;
        LIST_HEAD(list);
+       bool power_down = false;
 
        pm_runtime_get_sync(pl330->ddma.dev);
        spin_lock_irqsave(&pch->lock, flags);
@@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
        pch->thread->req[0].desc = NULL;
        pch->thread->req[1].desc = NULL;
        pch->thread->req_running = -1;
+       power_down = pch->active;
+       pch->active = false;
 
        /* Mark all desc done */
        list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
        list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
        spin_unlock_irqrestore(&pch->lock, flags);
        pm_runtime_mark_last_busy(pl330->ddma.dev);
+       if (power_down)
+               pm_runtime_put_autosuspend(pl330->ddma.dev);
        pm_runtime_put_autosuspend(pl330->ddma.dev);
 
        return 0;
@@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
                 * updated on work_list emptiness status.
                 */
                WARN_ON(list_empty(&pch->submitted_list));
+               pch->active = true;
                pm_runtime_get_sync(pch->dmac->ddma.dev);
        }
        list_splice_tail_init(&pch->submitted_list, &pch->work_list);
index 2e441d0ccd79a37a5486d394310839c2cdc47f01..4c357d47546594c6bd0c9b1ca16e27c658a720cf 100644 (file)
@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
 {
        struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
        struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+       struct rcar_dmac_chan_map *map = &rchan->map;
        struct rcar_dmac_desc_page *page, *_page;
        struct rcar_dmac_desc *desc;
        LIST_HEAD(list);
@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
                free_page((unsigned long)page);
        }
 
+       /* Remove slave mapping if present. */
+       if (map->slave.xfer_size) {
+               dma_unmap_resource(chan->device->dev, map->addr,
+                                  map->slave.xfer_size, map->dir, 0);
+               map->slave.xfer_size = 0;
+       }
+
        pm_runtime_put(chan->device->dev);
 }
 
index 3688d0873a3e1c844470c31105ecabd3b5d49be3..3056ce7f8c69d01c61fe3ab0eeff6ad299f538f7 100644 (file)
@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
        struct virt_dma_desc *vdesc;
        enum dma_status status;
        unsigned long flags;
-       u32 residue;
+       u32 residue = 0;
 
        status = dma_cookie_status(c, cookie, state);
        if ((status == DMA_COMPLETE) || (!state))
@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
        vdesc = vchan_find_desc(&chan->vchan, cookie);
-       if (cookie == chan->desc->vdesc.tx.cookie) {
+       if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
                residue = stm32_dma_desc_residue(chan, chan->desc,
                                                 chan->next_sg);
-       } else if (vdesc) {
+       else if (vdesc)
                residue = stm32_dma_desc_residue(chan,
                                                 to_stm32_dma_desc(vdesc), 0);
-       } else {
-               residue = 0;
-       }
-
        dma_set_residue(state, residue);
 
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
        struct stm32_dma_chan *chan;
        struct dma_chan *c;
 
-       if (dma_spec->args_count < 3)
+       if (dma_spec->args_count < 4)
                return NULL;
 
        cfg.channel_id = dma_spec->args[0];
        cfg.request_line = dma_spec->args[1];
        cfg.stream_config = dma_spec->args[2];
-       cfg.threshold = 0;
+       cfg.threshold = dma_spec->args[3];
 
        if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
                                STM32_DMA_MAX_REQUEST_ID))
                return NULL;
 
-       if (dma_spec->args_count > 3)
-               cfg.threshold = dma_spec->args[3];
-
        chan = &dmadev->chan[cfg.channel_id];
 
        c = dma_get_slave_channel(&chan->vchan.chan);
index 3f24aeb48c0e6735f13995ded459cac776b2620f..2403475a37cf9203eb0651ae7f412ef5f63f45d6 100644 (file)
@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
        match = of_match_node(ti_am335x_master_match, dma_node);
        if (!match) {
                dev_err(&pdev->dev, "DMA master is not supported\n");
+               of_node_put(dma_node);
                return -EINVAL;
        }
 
@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
        match = of_match_node(ti_dra7_master_match, dma_node);
        if (!match) {
                dev_err(&pdev->dev, "DMA master is not supported\n");
+               of_node_put(dma_node);
                return -EINVAL;
        }
 
index 70e13230d8db835ec201590762ea0ac5331f7ca2..9ad0b1934be9a31173ede1ed6c1c3705cc0c1e05 100644 (file)
@@ -721,11 +721,17 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val)
 
        ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
                                &buf, sizeof(buf));
-       if (!ret)
+       if (ret)
+               return ret;
+
+       if (scpi_info->is_legacy)
+               /* only 32-bits supported, hi_val can be junk */
+               *val = le32_to_cpu(buf.lo_val);
+       else
                *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
                        le32_to_cpu(buf.lo_val);
 
-       return ret;
+       return 0;
 }
 
 static int scpi_device_get_power_state(u16 dev_id)
index 520a40e5e0e431129cf1fe66a7604d32d7e20061..6c7d60c239b5b459b1f45e1d1110f7ed5a265cb6 100644 (file)
@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
        }
 
        /* allocate memory for new EFI memmap */
-       new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
-                                       PAGE_SIZE);
+       new_memmap_phy = efi_memmap_alloc(new_nr_map);
        if (!new_memmap_phy)
                return;
 
index b98824e3800abbb57b110d7cc92c0ca8417791d2..0e2a96b12cb3647635db19912ec0ab4004f71572 100644 (file)
@@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
 
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
 
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
-                       unsigned long orig_fdt_size,
-                       void *fdt, int new_fdt_size, char *cmdline_ptr,
-                       u64 initrd_addr, u64 initrd_size,
-                       efi_memory_desc_t *memory_map,
-                       unsigned long map_size, unsigned long desc_size,
-                       u32 desc_ver);
-
 efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                                            void *handle,
                                            unsigned long *new_fdt_addr,
index a6a93116a8f053f6c14911376ffa6da7f1dff44e..921dfa047202952c9064cd39971e68e0e3c28b49 100644 (file)
 
 #include "efistub.h"
 
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
-                       unsigned long orig_fdt_size,
-                       void *fdt, int new_fdt_size, char *cmdline_ptr,
-                       u64 initrd_addr, u64 initrd_size,
-                       efi_memory_desc_t *memory_map,
-                       unsigned long map_size, unsigned long desc_size,
-                       u32 desc_ver)
+static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
+                              unsigned long orig_fdt_size,
+                              void *fdt, int new_fdt_size, char *cmdline_ptr,
+                              u64 initrd_addr, u64 initrd_size)
 {
        int node, num_rsv;
        int status;
@@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        if (status)
                goto fdt_set_fail;
 
-       fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
+       fdt_val64 = U64_MAX; /* placeholder */
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
                             &fdt_val64,  sizeof(fdt_val64));
        if (status)
                goto fdt_set_fail;
 
-       fdt_val32 = cpu_to_fdt32(map_size);
+       fdt_val32 = U32_MAX; /* placeholder */
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
                             &fdt_val32,  sizeof(fdt_val32));
        if (status)
                goto fdt_set_fail;
 
-       fdt_val32 = cpu_to_fdt32(desc_size);
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
                             &fdt_val32, sizeof(fdt_val32));
        if (status)
                goto fdt_set_fail;
 
-       fdt_val32 = cpu_to_fdt32(desc_ver);
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
                             &fdt_val32, sizeof(fdt_val32));
        if (status)
@@ -148,6 +143,43 @@ fdt_set_fail:
        return EFI_LOAD_ERROR;
 }
 
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+       int node = fdt_path_offset(fdt, "/chosen");
+       u64 fdt_val64;
+       u32 fdt_val32;
+       int err;
+
+       if (node < 0)
+               return EFI_LOAD_ERROR;
+
+       fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
+                                 &fdt_val64, sizeof(fdt_val64));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       fdt_val32 = cpu_to_fdt32(*map->map_size);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
+                                 &fdt_val32, sizeof(fdt_val32));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       fdt_val32 = cpu_to_fdt32(*map->desc_size);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
+                                 &fdt_val32, sizeof(fdt_val32));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
+                                 &fdt_val32, sizeof(fdt_val32));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       return EFI_SUCCESS;
+}
+
 #ifndef EFI_FDT_ALIGN
 #define EFI_FDT_ALIGN EFI_PAGE_SIZE
 #endif
@@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                        goto fail;
                }
 
-               /*
-                * Now that we have done our final memory allocation (and free)
-                * we can get the memory map key  needed for
-                * exit_boot_services().
-                */
-               status = efi_get_memory_map(sys_table, &map);
-               if (status != EFI_SUCCESS)
-                       goto fail_free_new_fdt;
-
                status = update_fdt(sys_table,
                                    (void *)fdt_addr, fdt_size,
                                    (void *)*new_fdt_addr, new_fdt_size,
-                                   cmdline_ptr, initrd_addr, initrd_size,
-                                   memory_map, map_size, desc_size, desc_ver);
+                                   cmdline_ptr, initrd_addr, initrd_size);
 
                /* Succeeding the first time is the expected case. */
                if (status == EFI_SUCCESS)
@@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                        /*
                         * We need to allocate more space for the new
                         * device tree, so free existing buffer that is
-                        * too small.  Also free memory map, as we will need
-                        * to get new one that reflects the free/alloc we do
-                        * on the device tree buffer.
+                        * too small.
                         */
                        efi_free(sys_table, new_fdt_size, *new_fdt_addr);
-                       sys_table->boottime->free_pool(memory_map);
                        new_fdt_size += EFI_PAGE_SIZE;
                } else {
                        pr_efi_err(sys_table, "Unable to construct new device tree.\n");
-                       goto fail_free_mmap;
+                       goto fail_free_new_fdt;
                }
        }
 
-       sys_table->boottime->free_pool(memory_map);
        priv.runtime_map = runtime_map;
        priv.runtime_entry_count = &runtime_entry_count;
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
 
+               status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+               if (status != EFI_SUCCESS) {
+                       /*
+                        * The kernel won't get far without the memory map, but
+                        * may still be able to print something meaningful so
+                        * return success here.
+                        */
+                       return EFI_SUCCESS;
+               }
+
                /* Install the new virtual address map */
                svam = sys_table->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
@@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
        pr_efi_err(sys_table, "Exit boot services failed.\n");
 
-fail_free_mmap:
-       sys_table->boottime->free_pool(memory_map);
-
 fail_free_new_fdt:
        efi_free(sys_table, new_fdt_size, *new_fdt_addr);
 
index f03ddecd232b542ce4c7163506b69da0d29ad2c7..78686443cb378abf616c7cfe595b82d3a51c9cd6 100644 (file)
@@ -9,6 +9,44 @@
 #include <linux/efi.h>
 #include <linux/io.h>
 #include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+       return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+       unsigned int order = get_order(size);
+       struct page *p = alloc_pages(GFP_KERNEL, order);
+
+       if (!p)
+               return 0;
+
+       return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+       unsigned long size = num_entries * efi.memmap.desc_size;
+
+       if (slab_is_available())
+               return __efi_memmap_alloc_late(size);
+
+       return __efi_memmap_alloc_early(size);
+}
 
 /**
  * __efi_memmap_init - Common code for mapping the EFI memory map
index 44bdb78f837b4d89f2aaef6e7d0764b8c2783d87..29d58feaf67535d0efc470339de02124ed92cd44 100644 (file)
@@ -270,8 +270,7 @@ static int suspend_test_thread(void *arg)
        struct cpuidle_device *dev;
        struct cpuidle_driver *drv;
        /* No need for an actual callback, we just want to wake up the CPU. */
-       struct timer_list wakeup_timer =
-               TIMER_INITIALIZER(dummy_callback, 0, 0);
+       struct timer_list wakeup_timer;
 
        /* Wait for the main thread to give the start signal. */
        wait_for_completion(&suspend_threads_started);
@@ -287,6 +286,7 @@ static int suspend_test_thread(void *arg)
        pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
                cpu, drv->state_count - 1);
 
+       setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
        for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
                int index;
                /*
index 1e8fde8cb803d7406dd98d51673b63eb69fdfd23..2292742eac8f5441642469f8fa05e04ca0979afb 100644 (file)
@@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
        return 0;
 }
 
-static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
 {
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
index f4c26c7826cdfc0b5163632e596c0e664586549b..86bf3b84ada56d42758c2b3c57db91351b9f811b 100644 (file)
@@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
 
        /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
        gpiochip_sysfs_unregister(gdev);
+       gpiochip_free_hogs(chip);
        /* Numb the device, cancelling all outstanding operations */
        gdev->chip = NULL;
        gpiochip_irqchip_remove(chip);
        acpi_gpiochip_remove(chip);
        gpiochip_remove_pin_ranges(chip);
-       gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
        /*
         * We accept no more calls into the driver from this point, so
index 9ada56c16a589d01e43162f656c0a25c8d926284..4c851fde1e82231d04366b1916da170d0b24cb0e 100644 (file)
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                else if (type == CGS_UCODE_ID_SMU_SK)
                                        strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
                                break;
+                       case CHIP_POLARIS12:
+                               strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               break;
                        default:
                                DRM_ERROR("SMC firmware not supported\n");
                                return -EINVAL;
index 60bd4afe45c8c550b48e4484bf4d42f279f161c5..fe3bb94fe58df1fed0df7160c47932f358a513e8 100644 (file)
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
        "STONEY",
        "POLARIS10",
        "POLARIS11",
+       "POLARIS12",
        "LAST",
 };
 
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
        case CHIP_FIJI:
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
index 8cb937b2bfcc7fac0e3308aaa5cdc70cd53021c2..2534adaebe30f2d11a27fbaa121ff23cf6fce561 100644 (file)
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+       /* Polaris12 */
+       {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 
        {0, 0, 0}
 };
index fc592c2b0e166825100c1908691f02ac2f49a642..95a568df8551e589701b9cb2d1be6c04d97bf14a 100644 (file)
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
        switch (adev->asic_type) {
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        case CHIP_TONGA:
        case CHIP_FIJI:
        case CHIP_TOPAZ:
index a81dfaeeb8c0713e61b66046cf35fb8f89f86161..1d564beb0fde67541eab9be4ee76a39eb89197e1 100644 (file)
@@ -65,6 +65,7 @@
 #define FIRMWARE_STONEY                "amdgpu/stoney_uvd.bin"
 #define FIRMWARE_POLARIS10     "amdgpu/polaris10_uvd.bin"
 #define FIRMWARE_POLARIS11     "amdgpu/polaris11_uvd.bin"
+#define FIRMWARE_POLARIS12     "amdgpu/polaris12_uvd.bin"
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
 MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        case CHIP_POLARIS11:
                fw_name = FIRMWARE_POLARIS11;
                break;
+       case CHIP_POLARIS12:
+               fw_name = FIRMWARE_POLARIS12;
+               break;
        default:
                return -EINVAL;
        }
index 69b66b9e7f57e4ffaab3f83d6c17fb3a7161df34..8fec802d3908c05459c1e0ca2737cd19450ce9a4 100644 (file)
@@ -52,6 +52,7 @@
 #define FIRMWARE_STONEY                "amdgpu/stoney_vce.bin"
 #define FIRMWARE_POLARIS10     "amdgpu/polaris10_vce.bin"
 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12         "amdgpu/polaris12_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
 MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
        case CHIP_POLARIS11:
                fw_name = FIRMWARE_POLARIS11;
                break;
+       case CHIP_POLARIS12:
+               fw_name = FIRMWARE_POLARIS12;
+               break;
 
        default:
                return -EINVAL;
index b3d62b909f4372db1e569308e29a436de873d0c9..2006abbbfb6216d34c3fb3e18573e17f37561f18 100644 (file)
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 polaris11_golden_settings_a11,
                                                 (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
                num_crtc = 6;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                num_crtc = 5;
                break;
        default:
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
                adev->mode_info.audio.num_pins = 8;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                adev->mode_info.audio.num_pins = 6;
                break;
        default:
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
        int pll;
 
        if ((adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11)) {
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12)) {
                struct amdgpu_encoder *amdgpu_encoder =
                        to_amdgpu_encoder(amdgpu_crtc->encoder);
                struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2822,7 +2826,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
 
        if ((adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11)) {
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12)) {
                struct amdgpu_encoder *amdgpu_encoder =
                        to_amdgpu_encoder(amdgpu_crtc->encoder);
                int encoder_mode =
@@ -2992,6 +2997,7 @@ static int dce_v11_0_early_init(void *handle)
                adev->mode_info.num_dig = 6;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                adev->mode_info.num_hpd = 5;
                adev->mode_info.num_dig = 5;
                break;
@@ -3101,7 +3107,8 @@ static int dce_v11_0_hw_init(void *handle)
        amdgpu_atombios_crtc_powergate_init(adev);
        amdgpu_atombios_encoder_init_dig(adev);
        if ((adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11)) {
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12)) {
                amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
                                                   DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
                amdgpu_atombios_crtc_set_dce_clock(adev, 0,
index d0ec00986f3826c32957ab432c677c53c24091c2..373374164bd59edc0970e3f73a77ca31b17652e7 100644 (file)
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
        {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(tonga_golden_common_all));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 golden_settings_polaris11_a11,
                                                 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_POLARIS10:
                chip_name = "polaris10";
                break;
+       case CHIP_POLARIS12:
+               chip_name = "polaris12";
+               break;
        case CHIP_STONEY:
                chip_name = "stoney";
                break;
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
                gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                ret = amdgpu_atombios_get_gfx_info(adev);
                if (ret)
                        return ret;
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
 
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                PIPE_CONFIG(ADDR_SURF_P4_16x16) |
                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
                *rconf1 |= 0x0;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
                          SE_XSEL(1) | SE_YSEL(1);
                *rconf1 |= 0x0;
@@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
                        cz_enable_cp_power_gating(adev, true);
                else
                        cz_enable_cp_power_gating(adev, false);
-       } else if (adev->asic_type == CHIP_POLARIS11) {
+       } else if ((adev->asic_type == CHIP_POLARIS11) ||
+                  (adev->asic_type == CHIP_POLARIS12)) {
                gfx_v8_0_init_csb(adev);
                gfx_v8_0_init_save_restore_list(adev);
                gfx_v8_0_enable_save_restore_machine(adev);
@@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
        WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
        if (adev->asic_type == CHIP_POLARIS11 ||
-           adev->asic_type == CHIP_POLARIS10) {
+           adev->asic_type == CHIP_POLARIS10 ||
+           adev->asic_type == CHIP_POLARIS12) {
                tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
                tmp &= ~0x3;
                WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
                amdgpu_ring_write(ring, 0x0000002A);
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_ring_write(ring, 0x16000012);
                amdgpu_ring_write(ring, 0x00000000);
                break;
@@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                            (adev->asic_type == CHIP_FIJI) ||
                            (adev->asic_type == CHIP_STONEY) ||
                            (adev->asic_type == CHIP_POLARIS11) ||
-                           (adev->asic_type == CHIP_POLARIS10)) {
+                           (adev->asic_type == CHIP_POLARIS10) ||
+                           (adev->asic_type == CHIP_POLARIS12)) {
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                mqd->cp_hqd_persistent_state = tmp;
                if (adev->asic_type == CHIP_STONEY ||
                        adev->asic_type == CHIP_POLARIS11 ||
-                       adev->asic_type == CHIP_POLARIS10) {
+                       adev->asic_type == CHIP_POLARIS10 ||
+                       adev->asic_type == CHIP_POLARIS12) {
                        tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
                        tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
                        WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
                                                       bool enable)
 {
-       if (adev->asic_type == CHIP_POLARIS11)
+       if ((adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12))
                /* Send msg to SMU via Powerplay */
                amdgpu_set_powergating_state(adev,
                                             AMD_IP_BLOCK_TYPE_SMC,
@@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                        gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
                        gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
                else
index 0daac3a5be79572e65a2fff812453c2bd541940c..476bc9f1954b9daee41d154a94b17ee0896e59ed 100644 (file)
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 golden_settings_polaris11_a11,
                                                 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_POLARIS10:
                chip_name = "polaris10";
                break;
+       case CHIP_POLARIS12:
+               chip_name = "polaris12";
+               break;
        case CHIP_FIJI:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
index 1170a64a3184f6788c58cf1f6038bacaf45c4e62..034ace79ed492951d1ed06b9dc1543419b1948bf 100644 (file)
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
 
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 golden_settings_polaris11_a11,
                                                 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_POLARIS10:
                chip_name = "polaris10";
                break;
+       case CHIP_POLARIS12:
+               chip_name = "polaris12";
+               break;
        case CHIP_CARRIZO:
                chip_name = "carrizo";
                break;
index 6c65a1a2de798b5630d6b356065a32fe578608c5..10bedfac27b8118dae6735a65ecebcbdd4109d2b 100644 (file)
@@ -56,7 +56,6 @@
 #define BIOS_SCRATCH_4                                    0x5cd
 
 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
 MODULE_FIRMWARE("radeon/verde_smc.bin");
@@ -3488,19 +3487,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6817) ||
                    (adev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (adev->asic_type == CHIP_VERDE) {
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6820) ||
-                   (adev->pdev->device == 0x6821) ||
-                   (adev->pdev->device == 0x6822) ||
-                   (adev->pdev->device == 0x6823) ||
-                   (adev->pdev->device == 0x682A) ||
-                   (adev->pdev->device == 0x682B)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (adev->asic_type == CHIP_OLAND) {
                if ((adev->pdev->revision == 0xC7) ||
                    (adev->pdev->revision == 0x80) ||
@@ -7687,49 +7673,49 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
                chip_name = "tahiti";
                break;
        case CHIP_PITCAIRN:
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->device == 0x6810) ||
-                   (adev->pdev->device == 0x6811) ||
-                   (adev->pdev->device == 0x6816) ||
-                   (adev->pdev->device == 0x6817) ||
-                   (adev->pdev->device == 0x6806))
+               if ((adev->pdev->revision == 0x81) &&
+                   ((adev->pdev->device == 0x6810) ||
+                   (adev->pdev->device == 0x6811)))
                        chip_name = "pitcairn_k";
                else
                        chip_name = "pitcairn";
                break;
        case CHIP_VERDE:
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6820) ||
-                   (adev->pdev->device == 0x6821) ||
-                   (adev->pdev->device == 0x6822) ||
-                   (adev->pdev->device == 0x6823) ||
-                   (adev->pdev->device == 0x682A) ||
-                   (adev->pdev->device == 0x682B))
+               if (((adev->pdev->device == 0x6820) &&
+                       ((adev->pdev->revision == 0x81) ||
+                       (adev->pdev->revision == 0x83))) ||
+                   ((adev->pdev->device == 0x6821) &&
+                       ((adev->pdev->revision == 0x83) ||
+                       (adev->pdev->revision == 0x87))) ||
+                   ((adev->pdev->revision == 0x87) &&
+                       ((adev->pdev->device == 0x6823) ||
+                       (adev->pdev->device == 0x682b))))
                        chip_name = "verde_k";
                else
                        chip_name = "verde";
                break;
        case CHIP_OLAND:
-               if ((adev->pdev->revision == 0xC7) ||
-                   (adev->pdev->revision == 0x80) ||
-                   (adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6604) ||
-                   (adev->pdev->device == 0x6605))
+               if (((adev->pdev->revision == 0x81) &&
+                       ((adev->pdev->device == 0x6600) ||
+                       (adev->pdev->device == 0x6604) ||
+                       (adev->pdev->device == 0x6605) ||
+                       (adev->pdev->device == 0x6610))) ||
+                   ((adev->pdev->revision == 0x83) &&
+                       (adev->pdev->device == 0x6610)))
                        chip_name = "oland_k";
                else
                        chip_name = "oland";
                break;
        case CHIP_HAINAN:
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0xC3) ||
-                   (adev->pdev->device == 0x6664) ||
-                   (adev->pdev->device == 0x6665) ||
-                   (adev->pdev->device == 0x6667))
+               if (((adev->pdev->revision == 0x81) &&
+                       (adev->pdev->device == 0x6660)) ||
+                   ((adev->pdev->revision == 0x83) &&
+                       ((adev->pdev->device == 0x6660) ||
+                       (adev->pdev->device == 0x6663) ||
+                       (adev->pdev->device == 0x6665) ||
+                       (adev->pdev->device == 0x6667))) ||
+                   ((adev->pdev->revision == 0xc3) &&
+                       (adev->pdev->device == 0x6665)))
                        chip_name = "hainan_k";
                else
                        chip_name = "hainan";
index a79e283590fbe7fc9b627a097ccec81b99d1a66c..6de6becce74567d962eb498dcd33cbd1aedc62c6 100644 (file)
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-       static int curstate = -1;
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
                return 0;
 
-       if (curstate == state)
-               return 0;
-
-       curstate = state;
        if (enable) {
                /* wait for STATUS to clear */
                if (uvd_v5_0_wait_for_idle(handle))
index 6b3293a1c7b8b92d9d47d780de7c883a0beac195..5fb0b7f5c065121218ea4befe1a9da8def2e3035 100644 (file)
@@ -320,11 +320,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
 {
        u32 tmp;
 
-       /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
+       /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
        if ((adev->asic_type == CHIP_FIJI) ||
            (adev->asic_type == CHIP_STONEY) ||
            (adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11))
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12))
                return AMDGPU_VCE_HARVEST_VCE1;
 
        /* Tonga and CZ are dual or single pipe */
index bf088d6d9bf1f96430d7afda1ec3fbeae6e82768..c2ac54f1134179457db3823eb64161817ee8f5b3 100644 (file)
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
 
 /*
  * Indirect registers accessor
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
                break;
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        default:
                break;
        }
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
        case CHIP_TONGA:
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                asic_register_table = cz_allowed_read_registers;
@@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle)
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x50;
                break;
+       case CHIP_POLARIS12:
+               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
+               adev->pg_flags = 0;
+               adev->external_rev_id = adev->rev_id + 0x64;
+               break;
        case CHIP_CARRIZO:
                adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_GFX_MGCG |
@@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
        case CHIP_TONGA:
        case CHIP_POLARIS10:
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                vi_common_set_clockgating_state_by_smu(adev, state);
        default:
                break;
@@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                break;
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
                amdgpu_ip_block_add(adev, &vi_common_ip_block);
                amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
                amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
index c02469ada9f131f417e2a4fdb1d0c6b1fac4eafa..85f358764bbc8f2475e0bbf36df5013312354cab 100644 (file)
@@ -23,7 +23,7 @@
 #ifndef __AMD_SHARED_H__
 #define __AMD_SHARED_H__
 
-#define AMD_MAX_USEC_TIMEOUT           100000  /* 100 ms */
+#define AMD_MAX_USEC_TIMEOUT           200000  /* 200 ms */
 
 /*
  * Supported ASIC types
@@ -46,6 +46,7 @@ enum amd_asic_type {
        CHIP_STONEY,
        CHIP_POLARIS10,
        CHIP_POLARIS11,
+       CHIP_POLARIS12,
        CHIP_LAST,
 };
 
index dc6700aee18f624266a47d773abfb20bea573e03..b03606405a53399ea1a6755fecc637fa9bc09b23 100644 (file)
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                        break;
                case CHIP_POLARIS11:
                case CHIP_POLARIS10:
+               case CHIP_POLARIS12:
                        polaris_set_asic_special_caps(hwmgr);
                        hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
                        break;
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_TablelessHardwareInterface);
 
-       if (hwmgr->chip_id == CHIP_POLARIS11)
+       if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SPLLShutdownSupport);
        return 0;
index 26477f0f09dcc7045db5626abb7a9da8b4a2b3cc..6cd1287a7a8fda1cfe38482cd47899223496b2fd 100644 (file)
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
                                PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
                                result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
                                PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-                       } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+                       } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
                                result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
                                PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
                                result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
index e5812aa456f3b1440e525f0592d299672db14759..6e618aa20719d27f56b05db5ed2a76b7b7829c97 100644 (file)
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                        break;
                case CHIP_POLARIS11:
                case CHIP_POLARIS10:
+               case CHIP_POLARIS12:
                        polaris10_smum_init(smumgr);
                        break;
                default:
index 583f47f27b36de20a7ec1e273ce02559aa3fdae0..34f757bcabae8d88382f18392467e67f4e0f6100 100644 (file)
@@ -1259,8 +1259,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 
        if (!nonblock) {
                ret = drm_atomic_helper_wait_for_fences(dev, state, true);
-               if (ret)
+               if (ret) {
+                       drm_atomic_helper_cleanup_planes(dev, state);
                        return ret;
+               }
        }
 
        /*
index db516382a4d4b2ae375f41cde6cf3f8bfe5b5bb9..711c31c8d8b46c3c51e4f93a741daecf4b28ce31 100644 (file)
@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
        u8 changed = old ^ new;
        int ret;
 
+       memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
        if (!(changed & PCI_COMMAND_MEMORY))
                return 0;
 
@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
                        return ret;
        }
 
-       memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
        return 0;
 }
 
@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        if (WARN_ON(bytes > 4))
                return -EINVAL;
 
-       if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ))
+       if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
                return -EINVAL;
 
        /* First check if it's PCI_COMMAND */
index 7eaaf1c9ed2bf47e935bf0592a4c00658838086d..6c5fdf5b2ce2a9d407839a3a28a7e067a5630d8d 100644 (file)
@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
        INIT_LIST_HEAD(&gtt->oos_page_list_head);
        INIT_LIST_HEAD(&gtt->post_shadow_list_head);
 
+       intel_vgpu_reset_ggtt(vgpu);
+
        ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
                        NULL, 1, 0);
        if (IS_ERR(ggtt_mm)) {
@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
        int ret;
+       void *page_addr;
 
        gvt_dbg_core("init gtt\n");
 
@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                return -ENODEV;
        }
 
+       gvt->gtt.scratch_ggtt_page =
+               alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+       if (!gvt->gtt.scratch_ggtt_page) {
+               gvt_err("fail to allocate scratch ggtt page\n");
+               return -ENOMEM;
+       }
+
+       page_addr = page_address(gvt->gtt.scratch_ggtt_page);
+
+       gvt->gtt.scratch_ggtt_mfn =
+               intel_gvt_hypervisor_virt_to_mfn(page_addr);
+       if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
+               gvt_err("fail to translate scratch ggtt page\n");
+               __free_page(gvt->gtt.scratch_ggtt_page);
+               return -EFAULT;
+       }
+
        if (enable_out_of_sync) {
                ret = setup_spt_oos(gvt);
                if (ret) {
@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
  */
 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
 {
+       __free_page(gvt->gtt.scratch_ggtt_page);
+
        if (enable_out_of_sync)
                clean_spt_oos(gvt);
 }
+
+/**
+ * intel_vgpu_reset_ggtt - reset the GGTT entry
+ * @vgpu: a vGPU
+ *
+ * This function is called at the vGPU create stage
+ * to reset all the GGTT entries.
+ *
+ */
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+       u32 index;
+       u32 offset;
+       u32 num_entries;
+       struct intel_gvt_gtt_entry e;
+
+       memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
+       e.type = GTT_TYPE_GGTT_PTE;
+       ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
+       e.val64 |= _PAGE_PRESENT;
+
+       index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
+       num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
+       for (offset = 0; offset < num_entries; offset++)
+               ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+
+       index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
+       num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
+       for (offset = 0; offset < num_entries; offset++)
+               ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+}
index d250013bc37b053f45b3b747668d791fa477de26..b315ab3593ec37f2e73faf564a6d6c9fee9e7c81 100644 (file)
@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
        struct list_head oos_page_use_list_head;
        struct list_head oos_page_free_list_head;
        struct list_head mm_lru_list_head;
+
+       struct page *scratch_ggtt_page;
+       unsigned long scratch_ggtt_mfn;
 };
 
 enum {
@@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
 
 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
 
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
index ad0e9364ee703a65ce2a33834321bb4a9fce34f1..0af17016f33f24f40d338715e5c78bfac8058e92 100644 (file)
@@ -175,6 +175,7 @@ struct intel_vgpu {
                struct notifier_block group_notifier;
                struct kvm *kvm;
                struct work_struct release_work;
+               atomic_t released;
        } vdev;
 #endif
 };
index 4dd6722a733933224c269825d6f12f53525bf9c7..faaae07ae487277973533bbf907b6eefc2632a48 100644 (file)
@@ -114,12 +114,15 @@ out:
 static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
 {
        struct gvt_dma *entry;
+       kvm_pfn_t pfn;
 
        mutex_lock(&vgpu->vdev.cache_lock);
+
        entry = __gvt_cache_find(vgpu, gfn);
-       mutex_unlock(&vgpu->vdev.cache_lock);
+       pfn = (entry == NULL) ? 0 : entry->pfn;
 
-       return entry == NULL ? 0 : entry->pfn;
+       mutex_unlock(&vgpu->vdev.cache_lock);
+       return pfn;
 }
 
 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
@@ -166,7 +169,7 @@ static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
 
 static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
 {
-       struct device *dev = &vgpu->vdev.mdev->dev;
+       struct device *dev = mdev_dev(vgpu->vdev.mdev);
        struct gvt_dma *this;
        unsigned long g1;
        int rc;
@@ -195,7 +198,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
 {
        struct gvt_dma *dma;
        struct rb_node *node = NULL;
-       struct device *dev = &vgpu->vdev.mdev->dev;
+       struct device *dev = mdev_dev(vgpu->vdev.mdev);
        unsigned long gfn;
 
        mutex_lock(&vgpu->vdev.cache_lock);
@@ -396,7 +399,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        struct device *pdev;
        void *gvt;
 
-       pdev = mdev->parent->dev;
+       pdev = mdev_parent_dev(mdev);
        gvt = kdev_to_i915(pdev)->gvt;
 
        type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
@@ -418,7 +421,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        mdev_set_drvdata(mdev, vgpu);
 
        gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
-                    dev_name(&mdev->dev));
+                    dev_name(mdev_dev(mdev)));
        return 0;
 }
 
@@ -482,7 +485,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
 
        events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
-       ret = vfio_register_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, &events,
+       ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
                                &vgpu->vdev.iommu_notifier);
        if (ret != 0) {
                gvt_err("vfio_register_notifier for iommu failed: %d\n", ret);
@@ -490,17 +493,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        }
 
        events = VFIO_GROUP_NOTIFY_SET_KVM;
-       ret = vfio_register_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, &events,
+       ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
                                &vgpu->vdev.group_notifier);
        if (ret != 0) {
                gvt_err("vfio_register_notifier for group failed: %d\n", ret);
                goto undo_iommu;
        }
 
-       return kvmgt_guest_init(mdev);
+       ret = kvmgt_guest_init(mdev);
+       if (ret)
+               goto undo_group;
+
+       atomic_set(&vgpu->vdev.released, 0);
+       return ret;
+
+undo_group:
+       vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+                                       &vgpu->vdev.group_notifier);
 
 undo_iommu:
-       vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
+       vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
                                        &vgpu->vdev.iommu_notifier);
 out:
        return ret;
@@ -509,17 +521,26 @@ out:
 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
 {
        struct kvmgt_guest_info *info;
+       int ret;
 
        if (!handle_valid(vgpu->handle))
                return;
 
-       vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
+       if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+               return;
+
+       ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
                                        &vgpu->vdev.iommu_notifier);
-       vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
+       WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+
+       ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
                                        &vgpu->vdev.group_notifier);
+       WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
 
        info = (struct kvmgt_guest_info *)vgpu->handle;
        kvmgt_guest_exit(info);
+
+       vgpu->vdev.kvm = NULL;
        vgpu->handle = 0;
 }
 
@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
 {
        struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
                                        vdev.release_work);
+
        __intel_vgpu_release(vgpu);
 }
 
@@ -1089,7 +1111,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
        return 0;
 }
 
-static const struct parent_ops intel_vgpu_ops = {
+static const struct mdev_parent_ops intel_vgpu_ops = {
        .supported_type_groups  = intel_vgpu_type_groups,
        .create                 = intel_vgpu_create,
        .remove                 = intel_vgpu_remove,
@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
 
        idx = srcu_read_lock(&kvm->srcu);
        slot = gfn_to_memslot(kvm, gfn);
+       if (!slot) {
+               srcu_read_unlock(&kvm->srcu, idx);
+               return -EINVAL;
+       }
 
        spin_lock(&kvm->mmu_lock);
 
@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
 
        idx = srcu_read_lock(&kvm->srcu);
        slot = gfn_to_memslot(kvm, gfn);
+       if (!slot) {
+               srcu_read_unlock(&kvm->srcu, idx);
+               return -EINVAL;
+       }
 
        spin_lock(&kvm->mmu_lock);
 
@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
-       struct intel_vgpu *vgpu;
-
        if (!info) {
                gvt_err("kvmgt_guest_info invalid\n");
                return false;
        }
 
-       vgpu = info->vgpu;
-
        kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
        kvmgt_protect_table_destroy(info);
-       gvt_cache_destroy(vgpu);
+       gvt_cache_destroy(info->vgpu);
        vfree(info);
 
        return true;
@@ -1372,7 +1398,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
                return pfn;
 
        pfn = INTEL_GVT_INVALID_ADDR;
-       dev = &info->vgpu->vdev.mdev->dev;
+       dev = mdev_dev(info->vgpu->vdev.mdev);
        rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn);
        if (rc != 1) {
                gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
index d2a0fbc896c3cb2ca0734a9371b603fb8669a643..81cd921770c6db7748ad8e880180c3fb6f4c448a 100644 (file)
@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
        int i, ret;
 
        for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
-               mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)
+               mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
                        + i * PAGE_SIZE);
                if (mfn == INTEL_GVT_INVALID_ADDR) {
                        gvt_err("fail to get MFN from VA\n");
index 4a31b7a891ecaf3e2732c9f2d6ce62fa633419f6..3dd7fc662859a90803b142a8adf7f542f29c5948 100644 (file)
@@ -244,14 +244,16 @@ err_phys:
 
 static void
 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
-                               struct sg_table *pages)
+                               struct sg_table *pages,
+                               bool needs_clflush)
 {
        GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
        if (obj->mm.madv == I915_MADV_DONTNEED)
                obj->mm.dirty = false;
 
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+       if (needs_clflush &&
+           (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
            !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                drm_clflush_sg(pages);
 
@@ -263,7 +265,7 @@ static void
 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
                               struct sg_table *pages)
 {
-       __i915_gem_object_release_shmem(obj, pages);
+       __i915_gem_object_release_shmem(obj, pages, false);
 
        if (obj->mm.dirty) {
                struct address_space *mapping = obj->base.filp->f_mapping;
@@ -2231,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
        struct sgt_iter sgt_iter;
        struct page *page;
 
-       __i915_gem_object_release_shmem(obj, pages);
+       __i915_gem_object_release_shmem(obj, pages, true);
 
        i915_gem_gtt_finish_pages(obj, pages);
 
@@ -2304,15 +2306,6 @@ unlock:
        mutex_unlock(&obj->mm.lock);
 }
 
-static unsigned int swiotlb_max_size(void)
-{
-#if IS_ENABLED(CONFIG_SWIOTLB)
-       return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
-#else
-       return 0;
-#endif
-}
-
 static void i915_sg_trim(struct sg_table *orig_st)
 {
        struct sg_table new_st;
@@ -2322,7 +2315,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
        if (orig_st->nents == orig_st->orig_nents)
                return;
 
-       if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
+       if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
                return;
 
        new_sg = new_st.sgl;
@@ -2360,7 +2353,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
        GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-       max_segment = swiotlb_max_size();
+       max_segment = swiotlb_max_segment();
        if (!max_segment)
                max_segment = rounddown(UINT_MAX, PAGE_SIZE);
 
@@ -2728,6 +2721,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        struct drm_i915_gem_request *request;
        struct i915_gem_context *incomplete_ctx;
        struct intel_timeline *timeline;
+       unsigned long flags;
        bool ring_hung;
 
        if (engine->irq_seqno_barrier)
@@ -2763,13 +2757,20 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        if (i915_gem_context_is_default(incomplete_ctx))
                return;
 
+       timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+
+       spin_lock_irqsave(&engine->timeline->lock, flags);
+       spin_lock(&timeline->lock);
+
        list_for_each_entry_continue(request, &engine->timeline->requests, link)
                if (request->ctx == incomplete_ctx)
                        reset_request(request);
 
-       timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
        list_for_each_entry(request, &timeline->requests, link)
                reset_request(request);
+
+       spin_unlock(&timeline->lock);
+       spin_unlock_irqrestore(&engine->timeline->lock, flags);
 }
 
 void i915_gem_reset(struct drm_i915_private *dev_priv)
index e2b077df2da023107c92a818d3c74042dcf73bac..d229f47d1028bb615675f846ecbd46a43ae17197 100644 (file)
@@ -413,6 +413,25 @@ i915_gem_active_set(struct i915_gem_active *active,
        rcu_assign_pointer(active->request, request);
 }
 
+/**
+ * i915_gem_active_set_retire_fn - updates the retirement callback
+ * @active - the active tracker
+ * @fn - the routine called when the request is retired
+ * @mutex - struct_mutex used to guard retirements
+ *
+ * i915_gem_active_set_retire_fn() updates the function pointer that
+ * is called when the final request associated with the @active tracker
+ * is retired.
+ */
+static inline void
+i915_gem_active_set_retire_fn(struct i915_gem_active *active,
+                             i915_gem_retire_fn fn,
+                             struct mutex *mutex)
+{
+       lockdep_assert_held(mutex);
+       active->retire = fn ?: i915_gem_retire_noop;
+}
+
 static inline struct drm_i915_gem_request *
 __i915_gem_active_peek(const struct i915_gem_active *active)
 {
index 6daad86137606d700b6101ee0ffde7a7ee35a1cd..3dc8724df4004842c78bc985606da52a06e449c0 100644 (file)
@@ -16791,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
        for_each_intel_crtc(dev, crtc) {
                struct intel_crtc_state *crtc_state = crtc->config;
-               int pixclk = 0;
 
                __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
                memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16803,23 +16802,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                crtc->base.enabled = crtc_state->base.enable;
                crtc->active = crtc_state->base.active;
 
-               if (crtc_state->base.active) {
+               if (crtc_state->base.active)
                        dev_priv->active_crtcs |= 1 << crtc->pipe;
 
-                       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
-                               pixclk = ilk_pipe_pixel_rate(crtc_state);
-                       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-                               pixclk = crtc_state->base.adjusted_mode.crtc_clock;
-                       else
-                               WARN_ON(dev_priv->display.modeset_calc_cdclk);
-
-                       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-                       if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
-                               pixclk = DIV_ROUND_UP(pixclk * 100, 95);
-               }
-
-               dev_priv->min_pixclk[crtc->pipe] = pixclk;
-
                readout_plane_state(crtc);
 
                DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
@@ -16892,6 +16877,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        }
 
        for_each_intel_crtc(dev, crtc) {
+               int pixclk = 0;
+
                crtc->base.hwmode = crtc->config->base.adjusted_mode;
 
                memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
@@ -16919,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                         */
                        crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
 
+                       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
+                               pixclk = ilk_pipe_pixel_rate(crtc->config);
+                       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                               pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+                       else
+                               WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+                       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+                       if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+                               pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+
                        drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
                        update_scanline_offset(crtc);
                }
 
+               dev_priv->min_pixclk[crtc->pipe] = pixclk;
+
                intel_pipe_config_sanity_check(dev_priv, crtc->config);
        }
 }
index d9bc19be855e76b9bab0f1cee082bc170bccff26..0b8e8eb85c19dfe7e2bc4404914c2f578043e4e3 100644 (file)
@@ -355,7 +355,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
                                    struct intel_dp *intel_dp);
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
-                                             struct intel_dp *intel_dp);
+                                             struct intel_dp *intel_dp,
+                                             bool force_disable_vdd);
 static void
 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
 
@@ -516,7 +517,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 
        /* init power sequencer on this pipe and port */
        intel_dp_init_panel_power_sequencer(dev, intel_dp);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
 
        /*
         * Even vdd force doesn't work until we've made
@@ -553,7 +554,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
         * Only the HW needs to be reprogrammed, the SW state is fixed and
         * has been setup during connector init.
         */
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
 
        return 0;
 }
@@ -636,7 +637,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
                      port_name(port), pipe_name(intel_dp->pps_pipe));
 
        intel_dp_init_panel_power_sequencer(dev, intel_dp);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
 }
 
 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -2912,7 +2913,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
 
        /* init power sequencer on this pipe and port */
        intel_dp_init_panel_power_sequencer(dev, intel_dp);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
 }
 
 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
@@ -5055,7 +5056,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
-                                             struct intel_dp *intel_dp)
+                                             struct intel_dp *intel_dp,
+                                             bool force_disable_vdd)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp_on, pp_off, pp_div, port_sel = 0;
@@ -5068,6 +5070,31 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
        intel_pps_get_registers(dev_priv, intel_dp, &regs);
 
+       /*
+        * On some VLV machines the BIOS can leave the VDD
+        * enabled even on power seqeuencers which aren't
+        * hooked up to any port. This would mess up the
+        * power domain tracking the first time we pick
+        * one of these power sequencers for use since
+        * edp_panel_vdd_on() would notice that the VDD was
+        * already on and therefore wouldn't grab the power
+        * domain reference. Disable VDD first to avoid this.
+        * This also avoids spuriously turning the VDD on as
+        * soon as the new power seqeuencer gets initialized.
+        */
+       if (force_disable_vdd) {
+               u32 pp = ironlake_get_pp_control(intel_dp);
+
+               WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+
+               if (pp & EDP_FORCE_VDD)
+                       DRM_DEBUG_KMS("VDD already on, disabling first\n");
+
+               pp &= ~EDP_FORCE_VDD;
+
+               I915_WRITE(regs.pp_ctrl, pp);
+       }
+
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
                (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
        pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
@@ -5122,7 +5149,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
                vlv_initial_power_sequencer_setup(intel_dp);
        } else {
                intel_dp_init_panel_power_sequencer(dev, intel_dp);
-               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
        }
 }
 
index fd0e4dac7cc16f35ff658466bc107a7296407354..e589e17876dc134b60bbb5958ad1fe856f2f118e 100644 (file)
@@ -216,7 +216,8 @@ static void intel_overlay_submit_request(struct intel_overlay *overlay,
 {
        GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
                                        &overlay->i915->drm.struct_mutex));
-       overlay->last_flip.retire = retire;
+       i915_gem_active_set_retire_fn(&overlay->last_flip, retire,
+                                     &overlay->i915->drm.struct_mutex);
        i915_gem_active_set(&overlay->last_flip, req);
        i915_add_request(req);
 }
@@ -839,8 +840,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        if (ret)
                goto out_unpin;
 
-       i915_gem_track_fb(overlay->vma->obj, new_bo,
-                         INTEL_FRONTBUFFER_OVERLAY(pipe));
+       i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+                         vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
 
        overlay->old_vma = overlay->vma;
        overlay->vma = vma;
@@ -1430,6 +1431,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        overlay->contrast = 75;
        overlay->saturation = 146;
 
+       init_request_active(&overlay->last_flip, NULL);
+
        regs = intel_overlay_map_regs(overlay);
        if (!regs)
                goto out_unpin_bo;
index 4942ca090b46fdfa1cf4b6193f3d66095804e02e..7890e30eb584570d5d87d857131db4065a8b3849 100644 (file)
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
        struct drm_crtc_state *crtc_state;
        struct drm_rect clip = { 0, };
 
+       if (!state->crtc)
+               return 0;
+
        crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
        if (IS_ERR(crtc_state))
                return PTR_ERR(crtc_state);
index d836b2274531f83fcd49ea86d649ca49fb405489..f7c87017222048eff2bd17be8eb4f6c83f52b4a7 100644 (file)
  * - TV Panel encoding via ENCT
  */
 
+/* HHI Registers */
+#define HHI_VDAC_CNTL0         0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1         0x2F8 /* 0xbe offset in data sheet */
+#define HHI_HDMI_PHY_CNTL0     0x3a0 /* 0xe8 offset in data sheet */
+
 struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
        .mode_tag = MESON_VENC_MODE_CVBS_PAL,
        .hso_begin = 3,
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
 
 void meson_venc_init(struct meson_drm *priv)
 {
+       /* Disable CVBS VDAC */
+       regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+       regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+
+       /* Power Down Dacs */
+       writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+       /* Disable HDMI PHY */
+       regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+
+       /* Disable HDMI */
+       writel_bits_relaxed(0x3, 0,
+                           priv->io_base + _REG(VPU_HDMI_SETTING));
+
        /* Disable all encoders */
        writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
        writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
index c809c085fd78abd61f15f3c9d2a19f14ff173d00..a2bcc70a03efaaac6ee32058f747e57161222b23 100644 (file)
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
 
        /* Disable CVBS VDAC */
        regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
-       regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+       regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
 }
 
 static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
index a18126150e1136ea380dcdddc89aabe293d7fcbb..14ff87686a36ffb2580353bde1f0126964c2f8dc 100644 (file)
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 void adreno_flush(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       uint32_t wptr = get_wptr(gpu->rb);
+       uint32_t wptr;
+
+       /*
+        * Mask wptr value that we calculate to fit in the HW range. This is
+        * to account for the possibility that the last command fit exactly into
+        * the ringbuffer and rb->next hasn't wrapped to zero yet
+        */
+       wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
 
        /* ensure writes to ringbuffer have hit system memory: */
        mb();
index 166e84e4f0d48f80eacb992c3ccc8964af89b28d..489676568a10d15ac959093e6a09ee3f133abe45 100644 (file)
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                        pagefault_disable();
                }
 
-               if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+               if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+                       !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
                        DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
                        ret = -EINVAL;
                        goto out_unlock;
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 {
        uint32_t i, last_offset = 0;
        uint32_t *ptr;
-       int ret;
+       int ret = 0;
 
        if (offset % 4) {
                DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 
                ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
                if (ret)
-                       return -EFAULT;
+                       goto out;
 
                if (submit_reloc.submit_offset % 4) {
                        DRM_ERROR("non-aligned reloc offset: %u\n",
                                        submit_reloc.submit_offset);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                /* offset in dwords: */
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                if ((off >= (obj->base.size / 4)) ||
                                (off < last_offset)) {
                        DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
                if (ret)
-                       return ret;
+                       goto out;
 
                if (valid)
                        continue;
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                last_offset = off;
        }
 
+out:
        msm_gem_put_vaddr_locked(&obj->base);
 
-       return 0;
+       return ret;
 }
 
 static void submit_cleanup(struct msm_gem_submit *submit)
index f326cf6a32e64473461e0b7859101f1a404c0318..67b34e069abf383d4c533ab7d348fd5d576ad525 100644 (file)
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        struct msm_ringbuffer *ring;
        int ret;
 
-       size = ALIGN(size, 4);   /* size should be dword aligned */
+       if (WARN_ON(!is_power_of_2(size)))
+               return ERR_PTR(-EINVAL);
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring) {
index ad4d7b8b832271a8e28a1b59dc496fa19049ed34..e8a38d29685547a69fb1d657e833cd9cf5637cff 100644 (file)
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
 MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -1657,9 +1656,6 @@ static int si_init_microcode(struct radeon_device *rdev)
        switch (rdev->family) {
        case CHIP_TAHITI:
                chip_name = "TAHITI";
-               /* XXX: figure out which Tahitis need the new ucode */
-               if (0)
-                       new_smc = true;
                new_chip_name = "tahiti";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1671,12 +1667,9 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_PITCAIRN:
                chip_name = "PITCAIRN";
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->device == 0x6810) ||
-                   (rdev->pdev->device == 0x6811) ||
-                   (rdev->pdev->device == 0x6816) ||
-                   (rdev->pdev->device == 0x6817) ||
-                   (rdev->pdev->device == 0x6806))
+               if ((rdev->pdev->revision == 0x81) &&
+                   ((rdev->pdev->device == 0x6810) ||
+                    (rdev->pdev->device == 0x6811)))
                        new_smc = true;
                new_chip_name = "pitcairn";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1689,15 +1682,15 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_VERDE:
                chip_name = "VERDE";
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6820) ||
-                   (rdev->pdev->device == 0x6821) ||
-                   (rdev->pdev->device == 0x6822) ||
-                   (rdev->pdev->device == 0x6823) ||
-                   (rdev->pdev->device == 0x682A) ||
-                   (rdev->pdev->device == 0x682B))
+               if (((rdev->pdev->device == 0x6820) &&
+                    ((rdev->pdev->revision == 0x81) ||
+                     (rdev->pdev->revision == 0x83))) ||
+                   ((rdev->pdev->device == 0x6821) &&
+                    ((rdev->pdev->revision == 0x83) ||
+                     (rdev->pdev->revision == 0x87))) ||
+                   ((rdev->pdev->revision == 0x87) &&
+                    ((rdev->pdev->device == 0x6823) ||
+                     (rdev->pdev->device == 0x682b))))
                        new_smc = true;
                new_chip_name = "verde";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1710,13 +1703,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_OLAND:
                chip_name = "OLAND";
-               if ((rdev->pdev->revision == 0xC7) ||
-                   (rdev->pdev->revision == 0x80) ||
-                   (rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6604) ||
-                   (rdev->pdev->device == 0x6605))
+               if (((rdev->pdev->revision == 0x81) &&
+                    ((rdev->pdev->device == 0x6600) ||
+                     (rdev->pdev->device == 0x6604) ||
+                     (rdev->pdev->device == 0x6605) ||
+                     (rdev->pdev->device == 0x6610))) ||
+                   ((rdev->pdev->revision == 0x83) &&
+                    (rdev->pdev->device == 0x6610)))
                        new_smc = true;
                new_chip_name = "oland";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1728,12 +1721,15 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_HAINAN:
                chip_name = "HAINAN";
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0xC3) ||
-                   (rdev->pdev->device == 0x6664) ||
-                   (rdev->pdev->device == 0x6665) ||
-                   (rdev->pdev->device == 0x6667))
+               if (((rdev->pdev->revision == 0x81) &&
+                    (rdev->pdev->device == 0x6660)) ||
+                   ((rdev->pdev->revision == 0x83) &&
+                    ((rdev->pdev->device == 0x6660) ||
+                     (rdev->pdev->device == 0x6663) ||
+                     (rdev->pdev->device == 0x6665) ||
+                     (rdev->pdev->device == 0x6667))) ||
+                   ((rdev->pdev->revision == 0xc3) &&
+                    (rdev->pdev->device == 0x6665)))
                        new_smc = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
index 8b5e697f2549ee3f81dff2cb8cc8a05e7543c856..13ba73fd9b68849bd34d27207eb282eb6ff62793 100644 (file)
@@ -3008,19 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6817) ||
                    (rdev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (rdev->family == CHIP_VERDE) {
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6820) ||
-                   (rdev->pdev->device == 0x6821) ||
-                   (rdev->pdev->device == 0x6822) ||
-                   (rdev->pdev->device == 0x6823) ||
-                   (rdev->pdev->device == 0x682A) ||
-                   (rdev->pdev->device == 0x682B)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (rdev->family == CHIP_OLAND) {
                if ((rdev->pdev->revision == 0xC7) ||
                    (rdev->pdev->revision == 0x80) ||
index 725dffad5640cddf43d6aea4616cd90a035c40bc..6dfdb145f3bbb694c2750e30807640999404e91e 100644 (file)
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct tilcdc_drm_private *priv = dev->dev_private;
-       uint32_t stat;
+       uint32_t stat, reg;
 
        stat = tilcdc_read_irqstatus(dev);
        tilcdc_clear_irqstatus(dev, stat);
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
                dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
                                    __func__, stat);
                tilcdc_crtc->frame_intact = false;
-               if (tilcdc_crtc->sync_lost_count++ >
-                   SYNC_LOST_COUNT_LIMIT) {
-                       dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
-                       queue_work(system_wq, &tilcdc_crtc->recover_work);
-                       if (priv->rev == 1)
+               if (priv->rev == 1) {
+                       reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
+                       if (reg & LCDC_RASTER_ENABLE) {
                                tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
-                                            LCDC_V1_SYNC_LOST_INT_ENA);
-                       else
+                                            LCDC_RASTER_ENABLE);
+                               tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+                                          LCDC_RASTER_ENABLE);
+                       }
+               } else {
+                       if (tilcdc_crtc->sync_lost_count++ >
+                           SYNC_LOST_COUNT_LIMIT) {
+                               dev_err(dev->dev,
+                                       "%s(0x%08x): Sync lost flood detected, recovering",
+                                       __func__, stat);
+                               queue_work(system_wq,
+                                          &tilcdc_crtc->recover_work);
                                tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
                                             LCDC_SYNC_LOST);
-                       tilcdc_crtc->sync_lost_count = 0;
+                               tilcdc_crtc->sync_lost_count = 0;
+                       }
                }
        }
 
index d40ed9fdf68d990ce2b138579bc6404a6fae7d99..70b12f89a193dc369273cea5565bf804a78cc50a 100644 (file)
@@ -64,7 +64,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_SKIP_INPUT_MAPPING       BIT(2)
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 
-#define NOTEBOOK_QUIRKS                        QUIRK_FIX_NOTEBOOK_REPORT
+#define KEYBOARD_QUIRKS                        (QUIRK_FIX_NOTEBOOK_REPORT | \
+                                                QUIRK_NO_INIT_REPORTS)
 #define TOUCHPAD_QUIRKS                        (QUIRK_NO_INIT_REPORTS | \
                                                 QUIRK_SKIP_INPUT_MAPPING | \
                                                 QUIRK_IS_MULTITOUCH)
@@ -170,11 +171,11 @@ static int asus_raw_event(struct hid_device *hdev,
 
 static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
+       struct input_dev *input = hi->input;
        struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
 
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
                int ret;
-               struct input_dev *input = hi->input;
 
                input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
                input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
@@ -191,10 +192,10 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
                        hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
                        return ret;
                }
-
-               drvdata->input = input;
        }
 
+       drvdata->input = input;
+
        return 0;
 }
 
@@ -286,7 +287,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_stop_hw;
        }
 
-       drvdata->input->name = "Asus TouchPad";
+       if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+               drvdata->input->name = "Asus TouchPad";
+       } else {
+               drvdata->input->name = "Asus Keyboard";
+       }
 
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
                ret = asus_start_multitouch(hdev);
@@ -315,7 +320,7 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 
 static const struct hid_device_id asus_devices[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
-                USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+                USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), KEYBOARD_QUIRKS},
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
                         USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
        { }
index cff060b56da9d6cb8cb138657cf4efca7debef61..ea36b557d5eea87b27171937fe9bc05f680b15a8 100644 (file)
@@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
 #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
index 1b764d1745f3daa693a17ee706c302ff31ae0f0e..1689568b597d4e5bb8824ccbe679f627525bf2a4 100644 (file)
@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
                return rdesc;
 
+       if (*rsize < 4)
+               return rdesc;
+
        for (i = 0; i < *rsize - 4; i++)
                if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
                        rdesc[i] = 0x19;
index ec277b96eaa1b33461aa7702f38864598b910e59..f46f2c5117fae76a1c87105363e5c8db4c8673a3 100644 (file)
 #define USB_VENDOR_ID_DRAGONRISE               0x0079
 #define USB_DEVICE_ID_DRAGONRISE_WIIU          0x1800
 #define USB_DEVICE_ID_DRAGONRISE_PS3           0x1801
+#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR    0x1803
 #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE      0x1843
 
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_VENDOR_ID_FLATFROG         0x25b5
 #define USB_DEVICE_ID_MULTITOUCH_3200  0x0002
 
+#define USB_VENDOR_ID_FUTABA            0x0547
+#define USB_DEVICE_ID_LED_DISPLAY       0x7000
+
 #define USB_VENDOR_ID_ESSENTIAL_REALITY        0x0d7f
 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
 
 #define USB_VENDOR_ID_PETALYNX         0x18b1
 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE   0x0037
 
+#define USB_VENDOR_ID_PETZL            0x2122
+#define USB_DEVICE_ID_PETZL_HEADLAMP   0x1234
+
 #define USB_VENDOR_ID_PHILIPS          0x0471
 #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
 
index 5c925228847c8e88653ec5fb00edfc053fa81784..4ef73374a8f9881136cabeda32a67c6a21d53a85 100644 (file)
@@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        __s32 value;
        int ret = 0;
 
-       memset(buffer, 0, buffer_size);
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield)) {
@@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        int buffer_index = 0;
        int i;
 
+       memset(buffer, 0, buffer_size);
+
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield) ||
index 7687c0875395d6b351928a156be6f5268cf50cfe..f405b07d03816506215bd19fe3c878393370484a 100644 (file)
@@ -1099,8 +1099,11 @@ struct sony_sc {
        u8 led_delay_on[MAX_LEDS];
        u8 led_delay_off[MAX_LEDS];
        u8 led_count;
+       bool ds4_dongle_connected;
 };
 
+static void sony_set_leds(struct sony_sc *sc);
+
 static inline void sony_schedule_work(struct sony_sc *sc)
 {
        if (!sc->defer_initialization)
@@ -1430,6 +1433,31 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
                                return -EILSEQ;
                        }
                }
+
+               /*
+                * In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
+                * if a DS4 is actually connected (indicated by '0').
+                * For non-dongle, this bit is always 0 (connected).
+                */
+               if (sc->hdev->vendor == USB_VENDOR_ID_SONY &&
+                   sc->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
+                       bool connected = (rd[31] & 0x04) ? false : true;
+
+                       if (!sc->ds4_dongle_connected && connected) {
+                               hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
+                               sony_set_leds(sc);
+                               sc->ds4_dongle_connected = true;
+                       } else if (sc->ds4_dongle_connected && !connected) {
+                               hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
+                               sc->ds4_dongle_connected = false;
+                               /* Return 0, so hidraw can get the report. */
+                               return 0;
+                       } else if (!sc->ds4_dongle_connected) {
+                               /* Return 0, so hidraw can get the report. */
+                               return 0;
+                       }
+               }
+
                dualshock4_parse_report(sc, rd, size);
        }
 
@@ -2390,6 +2418,12 @@ static int sony_check_add(struct sony_sc *sc)
                }
 
                memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
+
+               snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
+                       "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+                       sc->mac_address[5], sc->mac_address[4],
+                       sc->mac_address[3], sc->mac_address[2],
+                       sc->mac_address[1], sc->mac_address[0]);
        } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
                        (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
                buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2548,7 +2582,7 @@ static int sony_input_configured(struct hid_device *hdev,
                        hid_err(sc->hdev,
                        "Unable to initialize multi-touch slots: %d\n",
                        ret);
-                       return ret;
+                       goto err_stop;
                }
 
                sony_init_output_report(sc, dualshock4_send_output_report);
index 78fb32a7b103446136000c8ee9ac64cf7fba7054..ea3c3546cef7f81f0f1c82b996c3ed5ab9fb26ed 100644 (file)
@@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        if (ret)
                goto out_unlock;
 
+       /*
+        * The HID over I2C specification states that if a DEVICE needs time
+        * after the PWR_ON request, it should utilise CLOCK stretching.
+        * However, it has been observered that the Windows driver provides a
+        * 1ms sleep between the PWR_ON and RESET requests and that some devices
+        * rely on this.
+        */
+       usleep_range(1000, 5000);
+
        i2c_hid_dbg(ihid, "resetting...\n");
 
        ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
index b3e01c82af0512dce7a68e6bde908d7e3afeaba8..e9d6cc7cdfc5c8019422d45914dc0363448bcb12 100644 (file)
@@ -83,11 +83,13 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
index 322ed9272811817324396960e63facf9df80d96c..841f2428e84a3936de0a60ee42700efd70933ed2 100644 (file)
@@ -1036,7 +1036,7 @@ static const u8 lm90_temp_emerg_index[3] = {
 };
 
 static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
-static const u8 lm90_max_alarm_bits[3] = { 0, 4, 12 };
+static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
 static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
 static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
 static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
index f6b6d42385e1148b2ad7f279feb9a43396e66894..784670e2736b4fbbc540382afc1aefe66ccc1e5c 100644 (file)
@@ -353,12 +353,12 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                                [0] = {
                                        .num = ST_ACCEL_FS_AVL_2G,
                                        .value = 0x00,
-                                       .gain = IIO_G_TO_M_S_2(1024),
+                                       .gain = IIO_G_TO_M_S_2(1000),
                                },
                                [1] = {
                                        .num = ST_ACCEL_FS_AVL_6G,
                                        .value = 0x01,
-                                       .gain = IIO_G_TO_M_S_2(340),
+                                       .gain = IIO_G_TO_M_S_2(3000),
                                },
                        },
                },
@@ -366,6 +366,14 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                        .addr = 0x21,
                        .mask = 0x40,
                },
+               /*
+                * Data Alignment Setting - needs to be set to get
+                * left-justified data like all other sensors.
+                */
+               .das = {
+                       .addr = 0x21,
+                       .mask = 0x01,
+               },
                .drdy_irq = {
                        .addr = 0x21,
                        .mask_int1 = 0x04,
index 38bc319904c4c2998c040e8a05bdcbd3f55e9088..9c8b558ba19ecb035b8f4fe789acee1663184c60 100644 (file)
@@ -561,7 +561,7 @@ config TI_ADS8688
 
 config TI_AM335X_ADC
        tristate "TI's AM335X ADC driver"
-       depends on MFD_TI_AM335X_TSCADC
+       depends on MFD_TI_AM335X_TSCADC && HAS_DMA
        select IIO_BUFFER
        select IIO_KFIFO_BUF
        help
index fe7775bb37402ea73915189d2c4f121b0a74464d..df4045203a0717c40613b2297c9f85a855333f68 100644 (file)
@@ -30,7 +30,9 @@ static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
 
        for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
                const struct iio_chan_spec *channel = &indio_dev->channels[i];
-               unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
+               unsigned int bytes_to_read =
+                       DIV_ROUND_UP(channel->scan_type.realbits +
+                                    channel->scan_type.shift, 8);
                unsigned int storage_bytes =
                        channel->scan_type.storagebits >> 3;
 
index 975a1f19f74760e5e1c17c786e36d9a86ae5a2a6..79c8c7cd70d5c6d74fc2e32cad372f8644233c6b 100644 (file)
@@ -401,6 +401,15 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
                        return err;
        }
 
+       /* set DAS */
+       if (sdata->sensor_settings->das.addr) {
+               err = st_sensors_write_data_with_mask(indio_dev,
+                                       sdata->sensor_settings->das.addr,
+                                       sdata->sensor_settings->das.mask, 1);
+               if (err < 0)
+                       return err;
+       }
+
        if (sdata->int_pin_open_drain) {
                dev_info(&indio_dev->dev,
                         "set interrupt line to open drain mode\n");
@@ -483,8 +492,10 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
        int err;
        u8 *outdata;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
-       unsigned int byte_for_channel = ch->scan_type.realbits >> 3;
+       unsigned int byte_for_channel;
 
+       byte_for_channel = DIV_ROUND_UP(ch->scan_type.realbits +
+                                       ch->scan_type.shift, 8);
        outdata = kmalloc(byte_for_channel, GFP_KERNEL);
        if (!outdata)
                return -ENOMEM;
index 2d2ee353dde7fc7bc976ced92cd969ccd9fade53..a5913e97945eb6b5f26a418a71852fe086e6cd42 100644 (file)
@@ -153,7 +153,7 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
                ior_cfg = val | priv->preset_enable[chan->channel] << 1;
 
                /* Load I/O control configuration */
-               outb(0x40 | ior_cfg, base_offset);
+               outb(0x40 | ior_cfg, base_offset + 1);
 
                return 0;
        case IIO_CHAN_INFO_SCALE:
@@ -233,7 +233,7 @@ static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
        const struct quad8_iio *const priv = iio_priv(indio_dev);
 
        return snprintf(buf, PAGE_SIZE, "%u\n",
-               priv->preset_enable[chan->channel]);
+               !priv->preset_enable[chan->channel]);
 }
 
 static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
@@ -241,7 +241,7 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
        size_t len)
 {
        struct quad8_iio *const priv = iio_priv(indio_dev);
-       const int base_offset = priv->base + 2 * chan->channel;
+       const int base_offset = priv->base + 2 * chan->channel + 1;
        bool preset_enable;
        int ret;
        unsigned int ior_cfg;
@@ -250,6 +250,9 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
        if (ret)
                return ret;
 
+       /* Preset enable is active low in Input/Output Control register */
+       preset_enable = !preset_enable;
+
        priv->preset_enable[chan->channel] = preset_enable;
 
        ior_cfg = priv->ab_enable[chan->channel] |
@@ -362,7 +365,7 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
        priv->synchronous_mode[chan->channel] = synchronous_mode;
 
        /* Load Index Control configuration to Index Control Register */
-       outb(0x40 | idr_cfg, base_offset);
+       outb(0x60 | idr_cfg, base_offset);
 
        return 0;
 }
@@ -444,7 +447,7 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
        priv->index_polarity[chan->channel] = index_polarity;
 
        /* Load Index Control configuration to Index Control Register */
-       outb(0x40 | idr_cfg, base_offset);
+       outb(0x60 | idr_cfg, base_offset);
 
        return 0;
 }
index 5355507f8fa1493c8fc5346e4f51da37f8f4790c..c9e319bff58b314f626797f9dbf36339eb76be04 100644 (file)
 
 #define BMI160_REG_DUMMY               0x7F
 
-#define BMI160_ACCEL_PMU_MIN_USLEEP    3200
-#define BMI160_ACCEL_PMU_MAX_USLEEP    3800
-#define BMI160_GYRO_PMU_MIN_USLEEP     55000
-#define BMI160_GYRO_PMU_MAX_USLEEP     80000
+#define BMI160_ACCEL_PMU_MIN_USLEEP    3800
+#define BMI160_GYRO_PMU_MIN_USLEEP     80000
 #define BMI160_SOFTRESET_USLEEP                1000
 
 #define BMI160_CHANNEL(_type, _axis, _index) {                 \
@@ -151,20 +149,9 @@ static struct bmi160_regs bmi160_regs[] = {
        },
 };
 
-struct bmi160_pmu_time {
-       unsigned long min;
-       unsigned long max;
-};
-
-static struct bmi160_pmu_time bmi160_pmu_time[] = {
-       [BMI160_ACCEL] = {
-               .min = BMI160_ACCEL_PMU_MIN_USLEEP,
-               .max = BMI160_ACCEL_PMU_MAX_USLEEP
-       },
-       [BMI160_GYRO] = {
-               .min = BMI160_GYRO_PMU_MIN_USLEEP,
-               .max = BMI160_GYRO_PMU_MIN_USLEEP,
-       },
+static unsigned long bmi160_pmu_time[] = {
+       [BMI160_ACCEL] = BMI160_ACCEL_PMU_MIN_USLEEP,
+       [BMI160_GYRO] = BMI160_GYRO_PMU_MIN_USLEEP,
 };
 
 struct bmi160_scale {
@@ -289,7 +276,7 @@ int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
        if (ret < 0)
                return ret;
 
-       usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max);
+       usleep_range(bmi160_pmu_time[t], bmi160_pmu_time[t] + 1000);
 
        return 0;
 }
index a144ca3461fc91e2ad853302dcfc2241a9b3e923..81bd8e8da4a69af929760bfe1c12716fe16de972 100644 (file)
@@ -113,7 +113,7 @@ static const char max44000_int_time_avail_str[] =
        "0.100 "
        "0.025 "
        "0.00625 "
-       "0.001625";
+       "0.0015625";
 
 /* Available scales (internal to ulux) with pretty manual alignment: */
 static const int max44000_scale_avail_ulux_array[] = {
index c8413fc120e632bd9a6677c17507f3c454855d16..7031a8dd4d1404d439dafcb90af38b610774cc05 100644 (file)
@@ -1682,9 +1682,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
                size += ret;
        }
 
+       if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
+           flow_attr->num_of_specs == 1) {
+               struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
+               enum ib_flow_spec_type header_spec =
+                       ((union ib_flow_spec *)(flow_attr + 1))->type;
+
+               if (header_spec == IB_FLOW_SPEC_ETH)
+                       mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
+       }
+
        ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
-                          MLX4_CMD_WRAPPED);
+                          MLX4_CMD_NATIVE);
        if (ret == -ENOMEM)
                pr_err("mcg table is full. Fail to register network rule.\n");
        else if (ret == -ENXIO)
@@ -1701,7 +1711,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
        int err;
        err = mlx4_cmd(dev, reg_id, 0, 0,
                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
-                      MLX4_CMD_WRAPPED);
+                      MLX4_CMD_NATIVE);
        if (err)
                pr_err("Fail to detach network rule. registration id = 0x%llx\n",
                       reg_id);
index f3135ae22df429c743c771c40c29d049869091f5..abd18f31b24f68e27c28f1120e52aa96b9347887 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/poll.h>
 #include <linux/init.h>
index 6d949965867190458abaa653636ed4ab6702a4aa..c7d5b2b643d10798eb7da917f16888a2cff2bc3b 100644 (file)
@@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
        input_dev->name = xpad->name;
        input_dev->phys = xpad->phys;
        usb_to_input_id(xpad->udev, &input_dev->id);
+
+       if (xpad->xtype == XTYPE_XBOX360W) {
+               /* x360w controllers and the receiver have different ids */
+               input_dev->id.product = 0x02a1;
+       }
+
        input_dev->dev.parent = &xpad->intf->dev;
 
        input_set_drvdata(input_dev, xpad);
index a8b0a2eec344e863bd292251095a4e84f3a0b98b..7fed92fb8cc137c28e9e54a08d5bea4f1233ba08 100644 (file)
@@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
 
 MODULE_DEVICE_TABLE(i2c, adxl34x_id);
 
-#ifdef CONFIG_OF
 static const struct of_device_id adxl34x_of_id[] = {
        /*
         * The ADXL346 is backward-compatible with the ADXL345. Differences are
@@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
 };
 
 MODULE_DEVICE_TABLE(of, adxl34x_of_id);
-#endif
 
 static struct i2c_driver adxl34x_driver = {
        .driver = {
                .name = "adxl34x",
                .pm = &adxl34x_i2c_pm,
-               .of_match_table = of_match_ptr(adxl34x_of_id),
+               .of_match_table = adxl34x_of_id,
        },
        .probe    = adxl34x_i2c_probe,
        .remove   = adxl34x_i2c_remove,
index cde6f4bd8ea2e3f0f70fbccf3f684e5cd079ec04..6d279aa27cb9a10d70a2e732fe9599297883cbc0 100644 (file)
@@ -114,7 +114,7 @@ enum SS4_PACKET_ID {
                                 (_b[1] & 0x7F)         \
                                )
 
-#define SS4_TS_Y_V2(_b)                (s8)(                           \
+#define SS4_TS_Y_V2(_b)                -(s8)(                          \
                                 ((_b[3] & 0x01) << 7) |        \
                                 (_b[2] & 0x7F)         \
                                )
index aa7c5da608005cfb471279b484bcc46f39fe3c82..cb2bf203f4cabac152d8fedd13fe9ec55826f136 100644 (file)
@@ -29,7 +29,7 @@
  * after soft reset, we should wait for 1 ms
  * before the device becomes operational
  */
-#define SOFT_RESET_DELAY_MS    3
+#define SOFT_RESET_DELAY_US    3000
 /* and after hard reset, we should wait for max 500ms */
 #define HARD_RESET_DELAY_MS    500
 
@@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
        if (ret) {
                dev_err(&client->dev, "Unable to reset device\n");
        } else {
-               msleep(SOFT_RESET_DELAY_MS);
+               usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
                ret = synaptics_i2c_config(client);
                if (ret)
                        dev_err(&client->dev, "Unable to config device\n");
index 30cc627a4f4531ff93014ea94bf9835b0b37e8bc..8993983e3fe4892b748ff5d77c1597880c63e2c9 100644 (file)
@@ -41,7 +41,8 @@ config RMI4_SMB
 
 config RMI4_F03
         bool "RMI4 Function 03 (PS2 Guest)"
-        depends on RMI4_CORE && SERIO
+       depends on RMI4_CORE
+       depends on SERIO=y || RMI4_CORE=SERIO
         help
           Say Y here if you want to add support for RMI4 function 03.
 
index 77551f52220275c0589419ced9f7037cc11062b0..a7618776705ab929e42d4c3e870b4911f1a30d9c 100644 (file)
@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+               },
+       },
        { }
 };
 
index 02aec284decac37b1a93b16b609007c077db7f98..3e6003d32e565c748a43730574b9424eeb294d33 100644 (file)
@@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
 
                case QUEUE_HEADER_NORMAL:
                        report_count = ts->buf[FW_HDR_COUNT];
-                       if (report_count > 3) {
+                       if (report_count == 0 || report_count > 3) {
                                dev_err(&client->dev,
-                                       "too large report count: %*ph\n",
+                                       "bad report count: %*ph\n",
                                        HEADER_SIZE, ts->buf);
                                break;
                        }
index 019e02707cd5e3c893b3b6602a1847260b3ba401..3ef0f42984f2b1ec716509203988f9ebb70533cb 100644 (file)
@@ -1023,7 +1023,7 @@ again:
        next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
        left      = (head - next_tail) % CMD_BUFFER_SIZE;
 
-       if (left <= 2) {
+       if (left <= 0x20) {
                struct iommu_cmd sync_cmd;
                int ret;
 
index a88576d50740b2dbdbe6e65b2bfe1985f01c81ca..8ccbd7023194ee592fa91dafb67565d1ad9928aa 100644 (file)
@@ -903,8 +903,10 @@ int __init detect_intel_iommu(void)
                x86_init.iommu.iommu_init = intel_iommu_init;
 #endif
 
-       acpi_put_table(dmar_tbl);
-       dmar_tbl = NULL;
+       if (dmar_tbl) {
+               acpi_put_table(dmar_tbl);
+               dmar_tbl = NULL;
+       }
        up_write(&dmar_global_lock);
 
        return ret ? 1 : -ENODEV;
index c66c273dfd8ab1058030433961fcfaaf2046f846..8a185250ae5a5923d8ab9f34d811caa0f5e09b79 100644 (file)
@@ -2037,6 +2037,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
        if (context_present(context))
                goto out_unlock;
 
+       /*
+        * For kdump cases, old valid entries may be cached due to the
+        * in-flight DMA and copied pgtable, but there is no unmapping
+        * behaviour for them, thus we need an explicit cache flush for
+        * the newly-mapped device. For kdump, at this point, the device
+        * is supposed to finish reset at its driver probe stage, so no
+        * in-flight DMA will exist, and we don't need to worry anymore
+        * hereafter.
+        */
+       if (context_copied(context)) {
+               u16 did_old = context_domain_id(context);
+
+               if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+                       iommu->flush.flush_context(iommu, did_old,
+                                                  (((u16)bus) << 8) | devfn,
+                                                  DMA_CCMD_MASK_NOBIT,
+                                                  DMA_CCMD_DEVICE_INVL);
+       }
+
        pgd = domain->pgd;
 
        context_clear_entry(context);
@@ -5185,6 +5204,25 @@ static void intel_iommu_remove_device(struct device *dev)
 }
 
 #ifdef CONFIG_INTEL_IOMMU_SVM
+#define MAX_NR_PASID_BITS (20)
+static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
+{
+       /*
+        * Convert ecap_pss to extend context entry pts encoding, also
+        * respect the soft pasid_max value set by the iommu.
+        * - number of PASID bits = ecap_pss + 1
+        * - number of PASID table entries = 2^(pts + 5)
+        * Therefore, pts = ecap_pss - 4
+        * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
+        */
+       if (ecap_pss(iommu->ecap) < 5)
+               return 0;
+
+       /* pasid_max is encoded as actual number of entries not the bits */
+       return find_first_bit((unsigned long *)&iommu->pasid_max,
+                       MAX_NR_PASID_BITS) - 5;
+}
+
 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
 {
        struct device_domain_info *info;
@@ -5217,7 +5255,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
 
        if (!(ctx_lo & CONTEXT_PASIDE)) {
                context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
-               context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
+               context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
+                       intel_iommu_get_pts(iommu);
+
                wmb();
                /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
                 * extended to permit requests-with-PASID if the PASIDE bit
index e38936d05df18fa20564c0ed7e8093e3b377fd81..2a514036a83dc0da07c0966b7fe247c18356bbbf 100644 (file)
@@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                int is_new);
 struct md_cluster_info;
 
+/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
 enum mddev_flags {
        MD_ARRAY_FIRST_USE,     /* First use of array, needs initialization */
        MD_CLOSING,             /* If set, we are closing the array, do not open
@@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
 {
        return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
 }
+
+/* clear unsupported mddev_flags */
+static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
+       unsigned long unsupported_flags)
+{
+       mddev->flags &= ~unsupported_flags;
+}
 #endif /* _MD_MD_H */
index a162fedeb51a48ce31d18fbca5a51a0fe9204bf4..848365d474f3a3bca26168ac3619f2c52edd7618 100644 (file)
 #include "raid0.h"
 #include "raid5.h"
 
+#define UNSUPPORTED_MDDEV_FLAGS                \
+       ((1L << MD_HAS_JOURNAL) |       \
+        (1L << MD_JOURNAL_CLEAN) |     \
+        (1L << MD_FAILFAST_SUPPORTED))
+
 static int raid0_congested(struct mddev *mddev, int bits)
 {
        struct r0conf *conf = mddev->private;
@@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
        mddev->delta_disks = -1;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
-       clear_bit(MD_HAS_JOURNAL, &mddev->flags);
-       clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+       mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 
        create_strip_zones(mddev, &priv_conf);
 
@@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
        mddev->degraded = 0;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
-       clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+       mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 
        create_strip_zones(mddev, &priv_conf);
        return priv_conf;
@@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
        mddev->raid_disks = 1;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
-       clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+       mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 
        create_strip_zones(mddev, &priv_conf);
        return priv_conf;
index a1f3fbed91009c5aa3253109d82db8eed38a2720..7b0f647bcccb513b6650136cddd51bc0ebec796a 100644 (file)
 #include "raid1.h"
 #include "bitmap.h"
 
+#define UNSUPPORTED_MDDEV_FLAGS                \
+       ((1L << MD_HAS_JOURNAL) |       \
+        (1L << MD_JOURNAL_CLEAN))
+
 /*
  * Number of guaranteed r1bios in case of extreme VM load:
  */
@@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+                                struct r1bio *r1_bio)
 {
        struct r1conf *conf = mddev->private;
        struct raid1_info *mirror;
-       struct r1bio *r1_bio;
        struct bio *read_bio;
+       struct bitmap *bitmap = mddev->bitmap;
+       const int op = bio_op(bio);
+       const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+       int sectors_handled;
+       int max_sectors;
+       int rdisk;
+
+       wait_barrier(conf, bio);
+
+read_again:
+       rdisk = read_balance(conf, r1_bio, &max_sectors);
+
+       if (rdisk < 0) {
+               /* couldn't find anywhere to read from */
+               raid_end_bio_io(r1_bio);
+               return;
+       }
+       mirror = conf->mirrors + rdisk;
+
+       if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+           bitmap) {
+               /*
+                * Reading from a write-mostly device must take care not to
+                * over-take any writes that are 'behind'
+                */
+               raid1_log(mddev, "wait behind writes");
+               wait_event(bitmap->behind_wait,
+                          atomic_read(&bitmap->behind_writes) == 0);
+       }
+       r1_bio->read_disk = rdisk;
+       r1_bio->start_next_window = 0;
+
+       read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+       bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
+                max_sectors);
+
+       r1_bio->bios[rdisk] = read_bio;
+
+       read_bio->bi_iter.bi_sector = r1_bio->sector +
+               mirror->rdev->data_offset;
+       read_bio->bi_bdev = mirror->rdev->bdev;
+       read_bio->bi_end_io = raid1_end_read_request;
+       bio_set_op_attrs(read_bio, op, do_sync);
+       if (test_bit(FailFast, &mirror->rdev->flags) &&
+           test_bit(R1BIO_FailFast, &r1_bio->state))
+               read_bio->bi_opf |= MD_FAILFAST;
+       read_bio->bi_private = r1_bio;
+
+       if (mddev->gendisk)
+               trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+                                     read_bio, disk_devt(mddev->gendisk),
+                                     r1_bio->sector);
+
+       if (max_sectors < r1_bio->sectors) {
+               /*
+                * could not read all from this device, so we will need another
+                * r1_bio.
+                */
+               sectors_handled = (r1_bio->sector + max_sectors
+                                  - bio->bi_iter.bi_sector);
+               r1_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+
+               /*
+                * Cannot call generic_make_request directly as that will be
+                * queued in __make_request and subsequent mempool_alloc might
+                * block waiting for it.  So hand bio over to raid1d.
+                */
+               reschedule_retry(r1_bio);
+
+               r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+               r1_bio->master_bio = bio;
+               r1_bio->sectors = bio_sectors(bio) - sectors_handled;
+               r1_bio->state = 0;
+               r1_bio->mddev = mddev;
+               r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+               goto read_again;
+       } else
+               generic_make_request(read_bio);
+}
+
+static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+                               struct r1bio *r1_bio)
+{
+       struct r1conf *conf = mddev->private;
        int i, disks;
-       struct bitmap *bitmap;
+       struct bitmap *bitmap = mddev->bitmap;
        unsigned long flags;
        const int op = bio_op(bio);
-       const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_opf &
                                                (REQ_PREFLUSH | REQ_FUA));
@@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 
        md_write_start(mddev, bio); /* wait on superblock update early */
 
-       if (bio_data_dir(bio) == WRITE &&
-           ((bio_end_sector(bio) > mddev->suspend_lo &&
+       if ((bio_end_sector(bio) > mddev->suspend_lo &&
            bio->bi_iter.bi_sector < mddev->suspend_hi) ||
            (mddev_is_clustered(mddev) &&
             md_cluster_ops->area_resyncing(mddev, WRITE,
-                    bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
-               /* As the suspend_* range is controlled by
-                * userspace, we want an interruptible
-                * wait.
+                    bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+
+               /*
+                * As the suspend_* range is controlled by userspace, we want
+                * an interruptible wait.
                 */
                DEFINE_WAIT(w);
                for (;;) {
@@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
                            bio->bi_iter.bi_sector >= mddev->suspend_hi ||
                            (mddev_is_clustered(mddev) &&
                             !md_cluster_ops->area_resyncing(mddev, WRITE,
-                                    bio->bi_iter.bi_sector, bio_end_sector(bio))))
+                                    bio->bi_iter.bi_sector,
+                                    bio_end_sector(bio))))
                                break;
                        schedule();
                }
                finish_wait(&conf->wait_barrier, &w);
        }
-
        start_next_window = wait_barrier(conf, bio);
 
-       bitmap = mddev->bitmap;
-
-       /*
-        * make_request() can abort the operation when read-ahead is being
-        * used and no empty request is available.
-        *
-        */
-       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
-       r1_bio->master_bio = bio;
-       r1_bio->sectors = bio_sectors(bio);
-       r1_bio->state = 0;
-       r1_bio->mddev = mddev;
-       r1_bio->sector = bio->bi_iter.bi_sector;
-
-       /* We might need to issue multiple reads to different
-        * devices if there are bad blocks around, so we keep
-        * track of the number of reads in bio->bi_phys_segments.
-        * If this is 0, there is only one r1_bio and no locking
-        * will be needed when requests complete.  If it is
-        * non-zero, then it is the number of not-completed requests.
-        */
-       bio->bi_phys_segments = 0;
-       bio_clear_flag(bio, BIO_SEG_VALID);
-
-       if (rw == READ) {
-               /*
-                * read balancing logic:
-                */
-               int rdisk;
-
-read_again:
-               rdisk = read_balance(conf, r1_bio, &max_sectors);
-
-               if (rdisk < 0) {
-                       /* couldn't find anywhere to read from */
-                       raid_end_bio_io(r1_bio);
-                       return;
-               }
-               mirror = conf->mirrors + rdisk;
-
-               if (test_bit(WriteMostly, &mirror->rdev->flags) &&
-                   bitmap) {
-                       /* Reading from a write-mostly device must
-                        * take care not to over-take any writes
-                        * that are 'behind'
-                        */
-                       raid1_log(mddev, "wait behind writes");
-                       wait_event(bitmap->behind_wait,
-                                  atomic_read(&bitmap->behind_writes) == 0);
-               }
-               r1_bio->read_disk = rdisk;
-               r1_bio->start_next_window = 0;
-
-               read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
-                        max_sectors);
-
-               r1_bio->bios[rdisk] = read_bio;
-
-               read_bio->bi_iter.bi_sector = r1_bio->sector +
-                       mirror->rdev->data_offset;
-               read_bio->bi_bdev = mirror->rdev->bdev;
-               read_bio->bi_end_io = raid1_end_read_request;
-               bio_set_op_attrs(read_bio, op, do_sync);
-               if (test_bit(FailFast, &mirror->rdev->flags) &&
-                   test_bit(R1BIO_FailFast, &r1_bio->state))
-                       read_bio->bi_opf |= MD_FAILFAST;
-               read_bio->bi_private = r1_bio;
-
-               if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
-                                             read_bio, disk_devt(mddev->gendisk),
-                                             r1_bio->sector);
-
-               if (max_sectors < r1_bio->sectors) {
-                       /* could not read all from this device, so we will
-                        * need another r1_bio.
-                        */
-
-                       sectors_handled = (r1_bio->sector + max_sectors
-                                          - bio->bi_iter.bi_sector);
-                       r1_bio->sectors = max_sectors;
-                       spin_lock_irq(&conf->device_lock);
-                       if (bio->bi_phys_segments == 0)
-                               bio->bi_phys_segments = 2;
-                       else
-                               bio->bi_phys_segments++;
-                       spin_unlock_irq(&conf->device_lock);
-                       /* Cannot call generic_make_request directly
-                        * as that will be queued in __make_request
-                        * and subsequent mempool_alloc might block waiting
-                        * for it.  So hand bio over to raid1d.
-                        */
-                       reschedule_retry(r1_bio);
-
-                       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
-                       r1_bio->master_bio = bio;
-                       r1_bio->sectors = bio_sectors(bio) - sectors_handled;
-                       r1_bio->state = 0;
-                       r1_bio->mddev = mddev;
-                       r1_bio->sector = bio->bi_iter.bi_sector +
-                               sectors_handled;
-                       goto read_again;
-               } else
-                       generic_make_request(read_bio);
-               return;
-       }
-
-       /*
-        * WRITE:
-        */
        if (conf->pending_count >= max_queued_requests) {
                md_wakeup_thread(mddev->thread);
                raid1_log(mddev, "wait queued");
@@ -1280,8 +1261,7 @@ read_again:
                        int bad_sectors;
                        int is_bad;
 
-                       is_bad = is_badblock(rdev, r1_bio->sector,
-                                            max_sectors,
+                       is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
                                             &first_bad, &bad_sectors);
                        if (is_bad < 0) {
                                /* mustn't write here until the bad block is
@@ -1370,7 +1350,8 @@ read_again:
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
+               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
+                        max_sectors);
 
                if (first_clone) {
                        /* do behind I/O ?
@@ -1464,6 +1445,40 @@ read_again:
        wake_up(&conf->wait_barrier);
 }
 
+static void raid1_make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r1conf *conf = mddev->private;
+       struct r1bio *r1_bio;
+
+       /*
+        * make_request() can abort the operation when read-ahead is being
+        * used and no empty request is available.
+        *
+        */
+       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+       r1_bio->master_bio = bio;
+       r1_bio->sectors = bio_sectors(bio);
+       r1_bio->state = 0;
+       r1_bio->mddev = mddev;
+       r1_bio->sector = bio->bi_iter.bi_sector;
+
+       /*
+        * We might need to issue multiple reads to different devices if there
+        * are bad blocks around, so we keep track of the number of reads in
+        * bio->bi_phys_segments.  If this is 0, there is only one r1_bio and
+        * no locking will be needed when requests complete.  If it is
+        * non-zero, then it is the number of not-completed requests.
+        */
+       bio->bi_phys_segments = 0;
+       bio_clear_flag(bio, BIO_SEG_VALID);
+
+       if (bio_data_dir(bio) == READ)
+               raid1_read_request(mddev, bio, r1_bio);
+       else
+               raid1_write_request(mddev, bio, r1_bio);
+}
+
 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r1conf *conf = mddev->private;
@@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
                if (!IS_ERR(conf)) {
                        /* Array must appear to be quiesced */
                        conf->array_frozen = 1;
-                       clear_bit(MD_HAS_JOURNAL, &mddev->flags);
-                       clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+                       mddev_clear_unsupported_flags(mddev,
+                               UNSUPPORTED_MDDEV_FLAGS);
                }
                return conf;
        }
index ab5e86209322fc0d0ed3e2db18a468c6f5054f13..1920756828dfb3fd512e2cb5705f329f10324698 100644 (file)
@@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void __make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+                               struct r10bio *r10_bio)
 {
        struct r10conf *conf = mddev->private;
-       struct r10bio *r10_bio;
        struct bio *read_bio;
+       const int op = bio_op(bio);
+       const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+       int sectors_handled;
+       int max_sectors;
+       sector_t sectors;
+       struct md_rdev *rdev;
+       int slot;
+
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
+       sectors = bio_sectors(bio);
+       while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+           bio->bi_iter.bi_sector < conf->reshape_progress &&
+           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+               /*
+                * IO spans the reshape position.  Need to wait for reshape to
+                * pass
+                */
+               raid10_log(conf->mddev, "wait reshape");
+               allow_barrier(conf);
+               wait_event(conf->wait_barrier,
+                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
+                          conf->reshape_progress >= bio->bi_iter.bi_sector +
+                          sectors);
+               wait_barrier(conf);
+       }
+
+read_again:
+       rdev = read_balance(conf, r10_bio, &max_sectors);
+       if (!rdev) {
+               raid_end_bio_io(r10_bio);
+               return;
+       }
+       slot = r10_bio->read_slot;
+
+       read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+       bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
+                max_sectors);
+
+       r10_bio->devs[slot].bio = read_bio;
+       r10_bio->devs[slot].rdev = rdev;
+
+       read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+               choose_data_offset(r10_bio, rdev);
+       read_bio->bi_bdev = rdev->bdev;
+       read_bio->bi_end_io = raid10_end_read_request;
+       bio_set_op_attrs(read_bio, op, do_sync);
+       if (test_bit(FailFast, &rdev->flags) &&
+           test_bit(R10BIO_FailFast, &r10_bio->state))
+               read_bio->bi_opf |= MD_FAILFAST;
+       read_bio->bi_private = r10_bio;
+
+       if (mddev->gendisk)
+               trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+                                     read_bio, disk_devt(mddev->gendisk),
+                                     r10_bio->sector);
+       if (max_sectors < r10_bio->sectors) {
+               /*
+                * Could not read all from this device, so we will need another
+                * r10_bio.
+                */
+               sectors_handled = (r10_bio->sector + max_sectors
+                                  - bio->bi_iter.bi_sector);
+               r10_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+               /*
+                * Cannot call generic_make_request directly as that will be
+                * queued in __generic_make_request and subsequent
+                * mempool_alloc might block waiting for it.  so hand bio over
+                * to raid10d.
+                */
+               reschedule_retry(r10_bio);
+
+               r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+               r10_bio->master_bio = bio;
+               r10_bio->sectors = bio_sectors(bio) - sectors_handled;
+               r10_bio->state = 0;
+               r10_bio->mddev = mddev;
+               r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+               goto read_again;
+       } else
+               generic_make_request(read_bio);
+       return;
+}
+
+static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+                                struct r10bio *r10_bio)
+{
+       struct r10conf *conf = mddev->private;
        int i;
        const int op = bio_op(bio);
-       const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid10_plug_cb *plug = NULL;
+       sector_t sectors;
        int sectors_handled;
        int max_sectors;
-       int sectors;
 
        md_write_start(mddev, bio);
 
@@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_iter.bi_sector < conf->reshape_progress &&
            bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
-               /* IO spans the reshape position.  Need to wait for
-                * reshape to pass
+               /*
+                * IO spans the reshape position.  Need to wait for reshape to
+                * pass
                 */
                raid10_log(conf->mddev, "wait reshape");
                allow_barrier(conf);
@@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
                           sectors);
                wait_barrier(conf);
        }
+
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio_data_dir(bio) == WRITE &&
            (mddev->reshape_backwards
             ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
                bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
@@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
                conf->reshape_safe = mddev->reshape_position;
        }
 
-       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
-       r10_bio->master_bio = bio;
-       r10_bio->sectors = sectors;
-
-       r10_bio->mddev = mddev;
-       r10_bio->sector = bio->bi_iter.bi_sector;
-       r10_bio->state = 0;
-
-       /* We might need to issue multiple reads to different
-        * devices if there are bad blocks around, so we keep
-        * track of the number of reads in bio->bi_phys_segments.
-        * If this is 0, there is only one r10_bio and no locking
-        * will be needed when the request completes.  If it is
-        * non-zero, then it is the number of not-completed requests.
-        */
-       bio->bi_phys_segments = 0;
-       bio_clear_flag(bio, BIO_SEG_VALID);
-
-       if (rw == READ) {
-               /*
-                * read balancing logic:
-                */
-               struct md_rdev *rdev;
-               int slot;
-
-read_again:
-               rdev = read_balance(conf, r10_bio, &max_sectors);
-               if (!rdev) {
-                       raid_end_bio_io(r10_bio);
-                       return;
-               }
-               slot = r10_bio->read_slot;
-
-               read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
-                        max_sectors);
-
-               r10_bio->devs[slot].bio = read_bio;
-               r10_bio->devs[slot].rdev = rdev;
-
-               read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
-                       choose_data_offset(r10_bio, rdev);
-               read_bio->bi_bdev = rdev->bdev;
-               read_bio->bi_end_io = raid10_end_read_request;
-               bio_set_op_attrs(read_bio, op, do_sync);
-               if (test_bit(FailFast, &rdev->flags) &&
-                   test_bit(R10BIO_FailFast, &r10_bio->state))
-                       read_bio->bi_opf |= MD_FAILFAST;
-               read_bio->bi_private = r10_bio;
-
-               if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
-                                             read_bio, disk_devt(mddev->gendisk),
-                                             r10_bio->sector);
-               if (max_sectors < r10_bio->sectors) {
-                       /* Could not read all from this device, so we will
-                        * need another r10_bio.
-                        */
-                       sectors_handled = (r10_bio->sector + max_sectors
-                                          - bio->bi_iter.bi_sector);
-                       r10_bio->sectors = max_sectors;
-                       spin_lock_irq(&conf->device_lock);
-                       if (bio->bi_phys_segments == 0)
-                               bio->bi_phys_segments = 2;
-                       else
-                               bio->bi_phys_segments++;
-                       spin_unlock_irq(&conf->device_lock);
-                       /* Cannot call generic_make_request directly
-                        * as that will be queued in __generic_make_request
-                        * and subsequent mempool_alloc might block
-                        * waiting for it.  so hand bio over to raid10d.
-                        */
-                       reschedule_retry(r10_bio);
-
-                       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
-                       r10_bio->master_bio = bio;
-                       r10_bio->sectors = bio_sectors(bio) - sectors_handled;
-                       r10_bio->state = 0;
-                       r10_bio->mddev = mddev;
-                       r10_bio->sector = bio->bi_iter.bi_sector +
-                               sectors_handled;
-                       goto read_again;
-               } else
-                       generic_make_request(read_bio);
-               return;
-       }
-
-       /*
-        * WRITE:
-        */
        if (conf->pending_count >= max_queued_requests) {
                md_wakeup_thread(mddev->thread);
                raid10_log(mddev, "wait queued");
@@ -1300,8 +1308,7 @@ retry_write:
                        int bad_sectors;
                        int is_bad;
 
-                       is_bad = is_badblock(rdev, dev_sector,
-                                            max_sectors,
+                       is_bad = is_badblock(rdev, dev_sector, max_sectors,
                                             &first_bad, &bad_sectors);
                        if (is_bad < 0) {
                                /* Mustn't write here until the bad block
@@ -1405,8 +1412,7 @@ retry_write:
                        r10_bio->devs[i].bio = mbio;
 
                        mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
-                                          choose_data_offset(r10_bio,
-                                                             rdev));
+                                          choose_data_offset(r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
                        bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1457,8 +1463,7 @@ retry_write:
                        r10_bio->devs[i].repl_bio = mbio;
 
                        mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
-                                          choose_data_offset(
-                                                  r10_bio, rdev));
+                                          choose_data_offset(r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
                        bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1503,6 +1508,36 @@ retry_write:
        one_write_done(r10_bio);
 }
 
+static void __make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r10conf *conf = mddev->private;
+       struct r10bio *r10_bio;
+
+       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+       r10_bio->master_bio = bio;
+       r10_bio->sectors = bio_sectors(bio);
+
+       r10_bio->mddev = mddev;
+       r10_bio->sector = bio->bi_iter.bi_sector;
+       r10_bio->state = 0;
+
+       /*
+        * We might need to issue multiple reads to different devices if there
+        * are bad blocks around, so we keep track of the number of reads in
+        * bio->bi_phys_segments.  If this is 0, there is only one r10_bio and
+        * no locking will be needed when the request completes.  If it is
+        * non-zero, then it is the number of not-completed requests.
+        */
+       bio->bi_phys_segments = 0;
+       bio_clear_flag(bio, BIO_SEG_VALID);
+
+       if (bio_data_dir(bio) == READ)
+               raid10_read_request(mddev, bio, r10_bio);
+       else
+               raid10_write_request(mddev, bio, r10_bio);
+}
+
 static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
index d7bfb6fc8aef8808b143c024f823bab4e6bf640b..0e8ed2c327b07fd849c1720d7b272dd860b949b9 100644 (file)
@@ -1682,8 +1682,7 @@ out:
 
 static struct stripe_head *
 r5c_recovery_alloc_stripe(struct r5conf *conf,
-                         sector_t stripe_sect,
-                         sector_t log_start)
+                         sector_t stripe_sect)
 {
        struct stripe_head *sh;
 
@@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
                return NULL;  /* no more stripe available */
 
        r5l_recovery_reset_stripe(sh);
-       sh->log_start = log_start;
 
        return sh;
 }
@@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                                stripe_sect);
 
                if (!sh) {
-                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
                        /*
                         * cannot get stripe from raid5_get_active_stripe
                         * try replay some stripes
@@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                r5c_recovery_replay_stripes(
                                        cached_stripe_list, ctx);
                                sh = r5c_recovery_alloc_stripe(
-                                       conf, stripe_sect, ctx->pos);
+                                       conf, stripe_sect);
                        }
                        if (!sh) {
                                pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
@@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                        conf->min_nr_stripes * 2);
                                raid5_set_cache_size(mddev,
                                                     conf->min_nr_stripes * 2);
-                               sh = r5c_recovery_alloc_stripe(
-                                       conf, stripe_sect, ctx->pos);
+                               sh = r5c_recovery_alloc_stripe(conf,
+                                                              stripe_sect);
                        }
                        if (!sh) {
                                pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
@@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                        if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
                            test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
                                r5l_recovery_replay_one_stripe(conf, sh, ctx);
-                               sh->log_start = ctx->pos;
                                list_move_tail(&sh->lru, cached_stripe_list);
                        }
                        r5l_recovery_load_data(log, sh, ctx, payload,
@@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
                        set_bit(R5_UPTODATE, &dev->flags);
                }
        }
-       list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
-       atomic_inc(&log->stripe_in_journal_count);
 }
 
 /*
@@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
        struct stripe_head *sh, *next;
        struct mddev *mddev = log->rdev->mddev;
        struct page *page;
+       sector_t next_checkpoint = MaxSector;
 
        page = alloc_page(GFP_KERNEL);
        if (!page) {
@@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                return -ENOMEM;
        }
 
+       WARN_ON(list_empty(&ctx->cached_list));
+
        list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
                struct r5l_meta_block *mb;
                int i;
@@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
                             REQ_OP_WRITE, REQ_FUA, false);
                sh->log_start = ctx->pos;
+               list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+               atomic_inc(&log->stripe_in_journal_count);
                ctx->pos = write_pos;
                ctx->seq += 1;
-
+               next_checkpoint = sh->log_start;
                list_del_init(&sh->lru);
                raid5_release_stripe(sh);
        }
+       log->next_checkpoint = next_checkpoint;
        __free_page(page);
        return 0;
 }
@@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log)
        struct r5l_recovery_ctx ctx;
        int ret;
        sector_t pos;
-       struct stripe_head *sh;
 
        ctx.pos = log->last_checkpoint;
        ctx.seq = log->last_cp_seq;
@@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log)
                log->next_checkpoint = ctx.pos;
                r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
                ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
-       } else {
-               sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
-               log->next_checkpoint = sh->log_start;
        }
 
        if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
                pr_debug("md/raid:%s: starting from clean shutdown\n",
                         mdname(mddev));
        else {
-               pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+               pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
                         mdname(mddev), ctx.data_only_stripes,
                         ctx.data_parity_stripes);
 
@@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
        if (do_wakeup)
                wake_up(&conf->wait_for_overlap);
 
-       if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
-               return;
-
        spin_lock_irq(&conf->log->stripe_in_journal_lock);
        list_del_init(&sh->r5c);
        spin_unlock_irq(&conf->log->stripe_in_journal_lock);
@@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
        spin_lock_init(&log->stripe_in_journal_lock);
        atomic_set(&log->stripe_in_journal_count, 0);
 
+       rcu_assign_pointer(conf->log, log);
+
        if (r5l_load_log(log))
                goto error;
 
-       rcu_assign_pointer(conf->log, log);
        set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
        return 0;
 
 error:
+       rcu_assign_pointer(conf->log, NULL);
        md_unregister_thread(&log->reclaim_thread);
 reclaim_thread:
        mempool_destroy(log->meta_pool);
index 06d7279bdd048e66369961194bc1d21567a08446..36c13e4be9c9e5d0cedacb59910e7b4482eb6ddd 100644 (file)
@@ -62,6 +62,8 @@
 #include "raid0.h"
 #include "bitmap.h"
 
+#define UNSUPPORTED_MDDEV_FLAGS        (1L << MD_FAILFAST_SUPPORTED)
+
 #define cpu_to_group(cpu) cpu_to_node(cpu)
 #define ANY_GROUP NUMA_NO_NODE
 
@@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
        mddev->new_chunk_sectors = chunksect;
 
        ret = setup_conf(mddev);
-       if (!IS_ERR_VALUE(ret))
-               clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+       if (!IS_ERR(ret))
+               mddev_clear_unsupported_flags(mddev,
+                       UNSUPPORTED_MDDEV_FLAGS);
        return ret;
 }
 
index 0037153c80a6ad27a7949be67d0f643e52b23edd..2d9c5dd06e423266680294e4d806e2e736d32156 100644 (file)
@@ -450,7 +450,7 @@ bool mei_cldev_enabled(struct mei_cl_device *cldev)
 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
 
 /**
- * mei_cldev_enable_device - enable me client device
+ * mei_cldev_enable - enable me client device
  *     create connection with me client
  *
  * @cldev: me client device
index 391936c1aa041c5bd925d284aa593e65b5ce1065..b0395601c6ae83340f62cd801531632134b85a7a 100644 (file)
@@ -1541,7 +1541,7 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
 
        rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
        if (rets < 0)
-               return rets;
+               goto err;
 
        if (rets == 0) {
                cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
@@ -1575,11 +1575,8 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
                        cb->buf.size, cb->buf_idx);
 
        rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
-       if (rets) {
-               cl->status = rets;
-               list_move_tail(&cb->list, &cmpl_list->list);
-               return rets;
-       }
+       if (rets)
+               goto err;
 
        cl->status = 0;
        cl->writing_state = MEI_WRITING;
@@ -1587,14 +1584,21 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
        cb->completed = mei_hdr.msg_complete == 1;
 
        if (first_chunk) {
-               if (mei_cl_tx_flow_ctrl_creds_reduce(cl))
-                       return -EIO;
+               if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
+                       rets = -EIO;
+                       goto err;
+               }
        }
 
        if (mei_hdr.msg_complete)
                list_move_tail(&cb->list, &dev->write_waiting_list.list);
 
        return 0;
+
+err:
+       cl->status = rets;
+       list_move_tail(&cb->list, &cmpl_list->list);
+       return rets;
 }
 
 /**
index b8c293373eccd8293ab50fea6a4ce25b1745076b..a306de4318d7d2301fdcb8f34c7045e884278ad4 100644 (file)
@@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 static int ipddp_create(struct ipddp_route *new_rt)
 {
-        struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+        struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
 
         if (rt == NULL)
                 return -ENOMEM;
index 9ec33b51a0edad879701bef79d6c8f250d778b91..2ce7ae97ac9148d39137eff66954dfc3b4cf239b 100644 (file)
@@ -393,7 +393,7 @@ static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
        if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
                return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
        else
-               return mdiobus_read(priv->master_mii_bus, addr, regnum);
+               return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
 }
 
 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
@@ -407,7 +407,7 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
        if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
                bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
        else
-               mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+               mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
 
        return 0;
 }
@@ -982,6 +982,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
        struct device_node *dn = pdev->dev.of_node;
        struct b53_platform_data *pdata;
+       struct dsa_switch_ops *ops;
        struct bcm_sf2_priv *priv;
        struct b53_device *dev;
        struct dsa_switch *ds;
@@ -995,6 +996,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
+       ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
+       if (!ops)
+               return -ENOMEM;
+
        dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
        if (!dev)
                return -ENOMEM;
@@ -1014,6 +1019,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        ds = dev->ds;
 
        /* Override the parts that are non-standard wrt. normal b53 devices */
+       memcpy(ops, ds->ops, sizeof(*ops));
+       ds->ops = ops;
        ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
        ds->ops->setup = bcm_sf2_sw_setup;
        ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
index 155190db682d29a6a97b2267550954fb4eba639d..9943629fcbf9ae14a9683e0b2eb0da459f83c0c6 100644 (file)
@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                }
        }
 
+isr_done:
        /* If there is not a separate AN irq, handle it here */
        if (pdata->dev_irq == pdata->an_irq)
                pdata->phy_if.an_isr(irq, pdata);
@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
        if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
                pdata->i2c_if.i2c_isr(irq, pdata);
 
-isr_done:
        return IRQ_HANDLED;
 }
 
index 25d1eb4933d0b8b28dc53d07344eb6a47a263893..7e8cf213fd813d8530f65c8439a77fd16ffeff9b 100644 (file)
@@ -1012,15 +1012,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
                goto out;
        }
 
-       /* Insert TSB and checksum infos */
-       if (priv->tsb_en) {
-               skb = bcm_sysport_insert_tsb(skb, dev);
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
-                       goto out;
-               }
-       }
-
        /* The Ethernet switch we are interfaced with needs packets to be at
         * least 64 bytes (including FCS) otherwise they will be discarded when
         * they enter the switch port logic. When Broadcom tags are enabled, we
@@ -1028,13 +1019,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
         * (including FCS and tag) because the length verification is done after
         * the Broadcom tag is stripped off the ingress packet.
         */
-       if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+       if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
                ret = NETDEV_TX_OK;
                goto out;
        }
 
-       skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
-                       ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+       /* Insert TSB and checksum infos */
+       if (priv->tsb_en) {
+               skb = bcm_sysport_insert_tsb(skb, dev);
+               if (!skb) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       skb_len = skb->len;
 
        mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
        if (dma_mapping_error(kdev, mapping)) {
index 185e9e047aa9adda61ac602caea5f59c3efe2d7e..ae42de4fdddf6b77d2c2cf1606795e139d4b472b 100644 (file)
@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
        tg3_mem_rx_release(tp);
        tg3_mem_tx_release(tp);
 
+       /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+       tg3_full_lock(tp, 0);
        if (tp->hw_stats) {
                dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
                                  tp->hw_stats, tp->stats_mapping);
                tp->hw_stats = NULL;
        }
+       tg3_full_unlock(tp);
 }
 
 /*
index 92be2cd8f817caef709fce5de320663bd44c9dc8..9906fda76087c013da8f1b2ca73a3cf5709480d3 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * macb_pci.c - Cadence GEM PCI wrapper.
+ * Cadence GEM PCI wrapper.
  *
  * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
  *
@@ -45,32 +45,27 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct macb_platform_data plat_data;
        struct resource res[2];
 
-       /* sanity check */
-       if (!id)
-               return -EINVAL;
-
        /* enable pci device */
-       err = pci_enable_device(pdev);
+       err = pcim_enable_device(pdev);
        if (err < 0) {
-               dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
-                       err);
-               return -EACCES;
+               dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
+               return err;
        }
 
        pci_set_master(pdev);
 
        /* set up resources */
        memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
-       res[0].start = pdev->resource[0].start;
-       res[0].end = pdev->resource[0].end;
+       res[0].start = pci_resource_start(pdev, 0);
+       res[0].end = pci_resource_end(pdev, 0);
        res[0].name = PCI_DRIVER_NAME;
        res[0].flags = IORESOURCE_MEM;
-       res[1].start = pdev->irq;
+       res[1].start = pci_irq_vector(pdev, 0);
        res[1].name = PCI_DRIVER_NAME;
        res[1].flags = IORESOURCE_IRQ;
 
-       dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
-                (void *)(uintptr_t)pci_resource_start(pdev, 0));
+       dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
+                &res[0].start);
 
        /* set up macb platform data */
        memset(&plat_data, 0, sizeof(plat_data));
@@ -100,7 +95,7 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        plat_info.num_res = ARRAY_SIZE(res);
        plat_info.data = &plat_data;
        plat_info.size_data = sizeof(plat_data);
-       plat_info.dma_mask = DMA_BIT_MASK(32);
+       plat_info.dma_mask = pdev->dma_mask;
 
        /* register platform device */
        plat_dev = platform_device_register_full(&plat_info);
@@ -120,7 +115,6 @@ err_hclk_register:
        clk_unregister(plat_data.pclk);
 
 err_pclk_register:
-       pci_disable_device(pdev);
        return err;
 }
 
@@ -130,7 +124,6 @@ static void macb_remove(struct pci_dev *pdev)
        struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
 
        platform_device_unregister(plat_dev);
-       pci_disable_device(pdev);
        clk_unregister(plat_data->pclk);
        clk_unregister(plat_data->hclk);
 }
index bbc8bd16cb971d6e2df3035d20ccd4151791a660..dcbce6cac63e2db7a936a7feea312b25ba3f2421 100644 (file)
@@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
 config LIQUIDIO_VF
        tristate "Cavium LiquidIO VF support"
        depends on 64BIT && PCI_MSI
-       select PTP_1588_CLOCK
+       imply PTP_1588_CLOCK
        ---help---
          This driver supports Cavium LiquidIO Intelligent Server Adapter
          based on CN23XX chips.
index 0f0de5b63622d8ebfb98c124d5768e806a3689e8..d04a6c1634452034217e9dc4ab8771bf02899b86 100644 (file)
@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
                if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                        fl6.flowi6_oif = sin6_scope_id;
                dst = ip6_route_output(&init_net, NULL, &fl6);
-               if (!dst)
-                       goto out;
-               if (!cxgb_our_interface(lldi, get_real_dev,
-                                       ip6_dst_idev(dst)->dev) &&
-                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+               if (dst->error ||
+                   (!cxgb_our_interface(lldi, get_real_dev,
+                                        ip6_dst_idev(dst)->dev) &&
+                    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
                        dst_release(dst);
-                       dst = NULL;
+                       return NULL;
                }
        }
 
-out:
        return dst;
 }
 EXPORT_SYMBOL(cxgb_find_route6);
index 7e1633bf5a22ccf1c9c123ba541349b5dfea1064..ec010ced6c99626c610609efc7933f7d54ae2619 100644 (file)
@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
 
        /* Check if mac has already been added as part of uc-list */
        for (i = 0; i < adapter->uc_macs; i++) {
-               if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
-                                    mac)) {
+               if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
                        /* mac already added, skip addition */
                        adapter->pmac_id[0] = adapter->pmac_id[i + 1];
                        return 0;
@@ -1655,14 +1654,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
 
 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
 {
-       if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
-                            adapter->dev_mac)) {
+       if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
                adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
                return 0;
        }
 
-       return be_cmd_pmac_add(adapter,
-                              (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+       return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
                               adapter->if_handle,
                               &adapter->pmac_id[uc_idx + 1], 0);
 }
@@ -1698,9 +1695,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
        }
 
        if (adapter->update_uc_list) {
-               i = 1; /* First slot is claimed by the Primary MAC */
-
                /* cache the uc-list in adapter array */
+               i = 0;
                netdev_for_each_uc_addr(ha, netdev) {
                        ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
                        i++;
@@ -5155,7 +5151,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
            skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
            skb->inner_protocol != htons(ETH_P_TEB) ||
            skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-           sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+               sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
+           !adapter->vxlan_port ||
+           udp_hdr(skb)->dest != adapter->vxlan_port)
                return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
        return features;
index 624ba9058dc46bd369f7be872b040aee1e814696..c9b7ad65e5633bc2a5147706f5c728f4d1cffa3c 100644 (file)
@@ -733,6 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
        priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
 
        /* Enable Congestion State Change Notifications and CS taildrop */
+       memset(&initcgr, 0, sizeof(initcgr));
        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
        initcgr.cgr.cscn_en = QM_CGR_EN;
 
@@ -2291,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
        net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
        if (!net_dev->phydev) {
                netif_err(priv, ifup, net_dev, "init_phy() failed\n");
-               return -ENODEV;
+               err = -ENODEV;
+               goto phy_init_failed;
        }
 
        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -2314,6 +2316,7 @@ mac_start_failed:
        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
                fman_port_disable(mac_dev->port[i]);
 
+phy_init_failed:
        dpaa_eth_napi_disable(priv);
 
        return err;
@@ -2420,6 +2423,7 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
        }
 
        /* Enable CS TD, but disable Congestion State Change Notifications. */
+       memset(&initcgr, 0, sizeof(initcgr));
        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
        initcgr.cgr.cscn_en = QM_CGR_EN;
        cs_th = DPAA_INGRESS_CS_THRESHOLD;
index a761001308dcc190d8c9bad57dad40f09dc49caa..1515abaa5ac9cab53ef4aab0ee05104c5bd2a61f 100644 (file)
@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                                     PAGE_SIZE,
                                     DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_drain(buffer_info->page, 0,
-                                 buffer_info->pagecnt_bias);
+               __page_frag_cache_drain(buffer_info->page,
+                                       buffer_info->pagecnt_bias);
 
                buffer_info->page = NULL;
        }
@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
                                     PAGE_SIZE, DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
+               __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of rx_buffer */
index 015198c14fa8531589f4443cb770989d629f51e6..504461a464c581bf77b5cca127680f2622221cde 100644 (file)
@@ -245,13 +245,9 @@ static u32 freq_to_shift(u16 freq)
 {
        u32 freq_khz = freq * 1000;
        u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
-       u64 tmp_rounded =
-               roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
-               roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
-       u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
-               max_val_cycles : tmp_rounded;
+       u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
        /* calculate max possible multiplier in order to fit in 64bit */
-       u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+       u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
 
        /* This comes from the reverse of clocksource_khz2mult */
        return ilog2(div_u64(max_mul * freq_khz, 1000000));
index edbe200ac2fa4a11ad30bb8f08d8f7e3cb910708..4910d9af19335d4b97d39760c163b41eecc26242 100644 (file)
@@ -2277,7 +2277,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
 
        if (priv->tx_ring_num[TX_XDP] &&
            !mlx4_en_check_xdp_mtu(dev, new_mtu))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        dev->mtu = new_mtu;
 
index 3c37e216bbf324d527a27bee53418f2ecd62e724..eac527e25ec902c2a586e9952272b9e8e599e2c8 100644 (file)
@@ -445,8 +445,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
                ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
 
                ring->stride = stride;
-               if (ring->stride <= TXBB_SIZE)
+               if (ring->stride <= TXBB_SIZE) {
+                       /* Stamp first unused send wqe */
+                       __be32 *ptr = (__be32 *)ring->buf;
+                       __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+                       *ptr = stamp;
+                       /* Move pointer to start of rx section */
                        ring->buf += TXBB_SIZE;
+               }
 
                ring->log_stride = ffs(ring->stride) - 1;
                ring->buf_size = ring->size * ring->stride;
index 2a9dd460a95f8149d884af048dc651eaaec4904f..e1f9e7cebf8f7adcf0a095513086b949c246aab6 100644 (file)
@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
        if (!buf)
                return -ENOMEM;
 
+       if (offset_in_page(buf)) {
+               dma_free_coherent(dev, PAGE_SIZE << order,
+                                 buf, sg_dma_address(mem));
+               return -ENOMEM;
+       }
+
        sg_set_buf(mem, buf, PAGE_SIZE << order);
-       BUG_ON(mem->offset);
        sg_dma_len(mem) = PAGE_SIZE << order;
        return 0;
 }
index 5e7840a7a33b55e9dc89efa43b349823d5b2e003..bffa6f345f2f40e35ebab8e546237da4fbe6b6a8 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/io-mapping.h>
 #include <linux/delay.h>
 #include <linux/kmod.h>
+#include <linux/etherdevice.h>
 #include <net/devlink.h>
 
 #include <linux/mlx4/device.h>
@@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
 }
 EXPORT_SYMBOL(mlx4_is_slave_active);
 
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+                                      struct _rule_hw *eth_header)
+{
+       if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+           is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+               struct mlx4_net_trans_rule_hw_eth *eth =
+                       (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+               struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+               bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+                       next_rule->rsvd == 0;
+
+               if (last_rule)
+                       ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+       }
+}
+EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
+
 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
                                       struct mlx4_dev_cap *dev_cap,
                                       struct mlx4_init_hca_param *hca_param)
index c548beaaf9109e376933660dffb39632622dbfc9..56185a0b827df6394a1edba70cf6925683ffb403 100644 (file)
@@ -4164,22 +4164,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
        return 0;
 }
 
-static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
-                                        struct _rule_hw *eth_header)
-{
-       if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
-           is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
-               struct mlx4_net_trans_rule_hw_eth *eth =
-                       (struct mlx4_net_trans_rule_hw_eth *)eth_header;
-               struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
-               bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
-                       next_rule->rsvd == 0;
-
-               if (last_rule)
-                       ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
-       }
-}
-
 /*
  * In case of missing eth header, append eth header with a MAC address
  * assigned to the VF.
@@ -4363,10 +4347,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
 
        if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
-               handle_eth_header_mcast_prio(ctrl, rule_header);
-
-       if (slave == dev->caps.function)
-               goto execute;
+               mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
 
        switch (header_id) {
        case MLX4_NET_TRANS_RULE_ID_ETH:
@@ -4394,7 +4375,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                goto err_put_qp;
        }
 
-execute:
        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
                           vhcr->in_modifier, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
@@ -4473,6 +4453,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
        struct res_qp *rqp;
        struct res_fs_rule *rrule;
        u64 mirr_reg_id;
+       int qpn;
 
        if (dev->caps.steering_mode !=
            MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -4489,10 +4470,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
        }
        mirr_reg_id = rrule->mirr_rule_id;
        kfree(rrule->mirr_mbox);
+       qpn = rrule->qpn;
 
        /* Release the rule form busy state before removal */
        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
-       err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err)
                return err;
 
@@ -4517,7 +4499,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
        if (!err)
                atomic_dec(&rqp->ref_count);
 out:
-       put_res(dev, slave, rrule->qpn, RES_QP);
+       put_res(dev, slave, qpn, RES_QP);
        return err;
 }
 
index 7f6c225666c18b193f642ba74cb8637e471ab3fc..f0b460f47f2992caad4eec7ea0d655296a46e99c 100644 (file)
@@ -723,6 +723,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
        int i;
        struct ieee_ets ets;
 
+       if (!MLX5_CAP_GEN(priv->mdev, ets))
+               return;
+
        memset(&ets, 0, sizeof(ets));
        ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
        for (i = 0; i < ets.ets_cap; i++) {
index 352462af8d51aced0ba2c2a0daa729e7aa05f68f..33a399a8b5d52297379ade73f37e61df266905fa 100644 (file)
@@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
                return NUM_SW_COUNTERS +
                       MLX5E_NUM_Q_CNTRS(priv) +
                       NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
-                      NUM_PCIE_COUNTERS +
                       MLX5E_NUM_RQ_STATS(priv) +
                       MLX5E_NUM_SQ_STATS(priv) +
                       MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
                       pport_2819_stats_desc[i].format);
 
-       for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
-               strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pcie_perf_stats_desc[i].format);
-
-       for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
-               strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pcie_tas_stats_desc[i].format);
-
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
                                                  pport_2819_stats_desc, i);
 
-       for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
-               data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
-                                                 pcie_perf_stats_desc, i);
-
-       for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
-               data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
-                                                 pcie_tas_stats_desc, i);
-
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
                        data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
index 3691451c728c0c4731e4fbc7946a8549490edc4d..d088effd7160355849faacead1326f2198d12e8d 100644 (file)
@@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
        }
        if (fs->flow_type & FLOW_MAC_EXT &&
            !is_zero_ether_addr(fs->m_ext.h_dest)) {
+               mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
                ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
                                             outer_headers_c, dmac_47_16),
                                fs->m_ext.h_dest);
index cbfa38fc72c0875d6754595e1335c5d1a5af26ff..2b7dd315020cd9e1a21b28643621122695cd06dd 100644 (file)
@@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
                                      &qcnt->rx_out_of_buffer);
 }
 
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
-       struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
-       struct mlx5_core_dev *mdev = priv->mdev;
-       int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
-       void *out;
-       u32 *in;
-
-       in = mlx5_vzalloc(sz);
-       if (!in)
-               return;
-
-       out = pcie_stats->pcie_perf_counters;
-       MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
-       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
-       out = pcie_stats->pcie_tas_counters;
-       MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
-       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
-       kvfree(in);
-}
-
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
        mlx5e_update_q_counter(priv);
        mlx5e_update_vport_counters(priv);
        mlx5e_update_pport_counters(priv);
        mlx5e_update_sw_counters(priv);
-       mlx5e_update_pcie_counters(priv);
 }
 
 void mlx5e_update_stats_work(struct work_struct *work)
@@ -3699,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
 
 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
        mlx5e_vxlan_cleanup(priv);
 
-       if (MLX5_CAP_GEN(mdev, vport_group_manager))
-               mlx5_eswitch_unregister_vport_rep(esw, 0);
-
        if (priv->xdp_prog)
                bpf_prog_put(priv->xdp_prog);
 }
@@ -3805,14 +3775,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        mlx5_lag_add(mdev, netdev);
 
-       if (mlx5e_vxlan_allowed(mdev)) {
-               rtnl_lock();
-               udp_tunnel_get_rx_info(netdev);
-               rtnl_unlock();
-       }
-
        mlx5e_enable_async_events(priv);
-       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
                mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
@@ -3822,13 +3785,30 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
                rep.netdev = netdev;
                mlx5_eswitch_register_vport_rep(esw, 0, &rep);
        }
+
+       if (netdev->reg_state != NETREG_REGISTERED)
+               return;
+
+       /* Device already registered: sync netdev system state */
+       if (mlx5e_vxlan_allowed(mdev)) {
+               rtnl_lock();
+               udp_tunnel_get_rx_info(netdev);
+               rtnl_unlock();
+       }
+
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 {
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
        queue_work(priv->wq, &priv->set_rx_mode_work);
+       if (MLX5_CAP_GEN(mdev, vport_group_manager))
+               mlx5_eswitch_unregister_vport_rep(esw, 0);
        mlx5e_disable_async_events(priv);
-       mlx5_lag_remove(priv->mdev);
+       mlx5_lag_remove(mdev);
 }
 
 static const struct mlx5e_profile mlx5e_nic_profile = {
@@ -3966,10 +3946,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
        const struct mlx5e_profile *profile = priv->profile;
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
-       if (profile->disable)
-               profile->disable(priv);
-
-       flush_workqueue(priv->wq);
 
        rtnl_lock();
        if (netif_running(netdev))
@@ -3977,6 +3953,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
        netif_device_detach(netdev);
        rtnl_unlock();
 
+       if (profile->disable)
+               profile->disable(priv);
+       flush_workqueue(priv->wq);
+
        mlx5e_destroy_q_counter(priv);
        profile->cleanup_rx(priv);
        mlx5e_close_drop_rq(priv);
index 1fffe48a93cc35d55d03eba9ba352590924548d2..cbfac06b7ffd1d5140226ccb87331db57d4880d8 100644 (file)
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
        switch (am->tune_state) {
        case MLX5E_AM_PARKING_ON_TOP:
        case MLX5E_AM_PARKING_TIRED:
-               WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
                return true;
        case MLX5E_AM_GOING_RIGHT:
                return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
        switch (am->tune_state) {
        case MLX5E_AM_PARKING_ON_TOP:
        case MLX5E_AM_PARKING_TIRED:
-               WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
                break;
        case MLX5E_AM_GOING_RIGHT:
                am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
        switch (am->tune_state) {
        case MLX5E_AM_PARKING_ON_TOP:
        case MLX5E_AM_PARKING_TIRED:
-               WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
                break;
        case MLX5E_AM_GOING_RIGHT:
                if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
        u32 delta_us = ktime_us_delta(end->time, start->time);
        unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
 
-       if (!delta_us) {
-               WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+       if (!delta_us)
                return;
-       }
 
        curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
        curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
index f202f872f57f6ee0c3e4aad428ffafa7be1a71a2..ba5db1dd23a97a7d378ec3ea8e36fa099d0a462a 100644 (file)
@@ -39,7 +39,7 @@
 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
        (*(u32 *)((char *)ptr + dsc[i].offset))
 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
-       be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+       be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
 
 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
        { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
 };
 
-#define PCIE_PERF_OFF(c) \
-       MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_GET(pcie_stats, c) \
-       MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
-                counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_TAS_OFF(c) \
-       MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
-#define PCIE_TAS_GET(pcie_stats, c) \
-       MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
-                counter_set.pcie_tas_cntrs_grp_data_layout.c)
-
-struct mlx5e_pcie_stats {
-       __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
-       __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
-};
-
-static const struct counter_desc pcie_perf_stats_desc[] = {
-       { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
-       { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_tas_stats_desc[] = {
-       { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
-       { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
-};
-
 struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
@@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
 #define NUM_PPORT_802_3_COUNTERS       ARRAY_SIZE(pport_802_3_stats_desc)
 #define NUM_PPORT_2863_COUNTERS                ARRAY_SIZE(pport_2863_stats_desc)
 #define NUM_PPORT_2819_COUNTERS                ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PCIE_PERF_COUNTERS         ARRAY_SIZE(pcie_perf_stats_desc)
-#define NUM_PCIE_TAS_COUNTERS          ARRAY_SIZE(pcie_tas_stats_desc)
 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
        ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
@@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
                                         NUM_PPORT_2819_COUNTERS  + \
                                         NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
                                         NUM_PPORT_PRIO)
-#define NUM_PCIE_COUNTERS              (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
 #define NUM_RQ_STATS                   ARRAY_SIZE(rq_stats_desc)
 #define NUM_SQ_STATS                   ARRAY_SIZE(sq_stats_desc)
 
@@ -406,7 +377,6 @@ struct mlx5e_stats {
        struct mlx5e_qcounter_stats qcnt;
        struct mlx5e_vport_stats vport;
        struct mlx5e_pport_stats pport;
-       struct mlx5e_pcie_stats pcie;
        struct rtnl_link_stats64 vf_vport;
 };
 
index f8829b5171560ed2f51cd05b8c82d2a076ce8cbb..118cea5e5489e69f7496f6772b7f189d66b3ac10 100644 (file)
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
        }
 }
 
+/* we get here also when setting rule to the FW failed, etc. It means that the
+ * flow rule itself might not exist, but some offloading related to the actions
+ * should be cleaned.
+ */
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_fc *counter = NULL;
 
-       counter = mlx5_flow_rule_counter(flow->rule);
-
-       mlx5_del_flow_rules(flow->rule);
+       if (!IS_ERR(flow->rule)) {
+               counter = mlx5_flow_rule_counter(flow->rule);
+               mlx5_del_flow_rules(flow->rule);
+               mlx5_fc_destroy(priv->mdev, counter);
+       }
 
        if (esw && esw->mode == SRIOV_OFFLOADS) {
                mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                        mlx5e_detach_encap(priv, flow);
        }
 
-       mlx5_fc_destroy(priv->mdev, counter);
-
        if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
                mlx5_destroy_flow_table(priv->fs.tc.t);
                priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
                                       outer_headers);
 
+       struct flow_dissector_key_control *enc_control =
+               skb_flow_dissector_target(f->dissector,
+                                         FLOW_DISSECTOR_KEY_ENC_CONTROL,
+                                         f->key);
+
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
                struct flow_dissector_key_ports *key =
                        skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
 
                /* Full udp dst port must be given */
                if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
-                       return -EOPNOTSUPP;
-
-               /* udp src port isn't supported */
-               if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
-                       return -EOPNOTSUPP;
+                       goto vxlan_match_offload_err;
 
                if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
-               else
+               else {
+                       netdev_warn(priv->netdev,
+                                   "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
                        return -EOPNOTSUPP;
+               }
 
                MLX5_SET(fte_match_set_lyr_2_4, headers_c,
                         udp_dport, ntohs(mask->dst));
                MLX5_SET(fte_match_set_lyr_2_4, headers_v,
                         udp_dport, ntohs(key->dst));
 
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+                        udp_sport, ntohs(mask->src));
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                        udp_sport, ntohs(key->src));
        } else { /* udp dst port must be given */
-                       return -EOPNOTSUPP;
+vxlan_match_offload_err:
+               netdev_warn(priv->netdev,
+                           "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
+               return -EOPNOTSUPP;
        }
 
-       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+       if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
                struct flow_dissector_key_ipv4_addrs *key =
                        skb_flow_dissector_target(f->dissector,
                                                  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                MLX5_SET(fte_match_set_lyr_2_4, headers_v,
                         dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
                         ntohl(key->dst));
-       }
 
-       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
-       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+       }
 
        /* Enforce DMAC when offloading incoming tunneled flows.
         * Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                        if (parse_tunnel_attr(priv, spec, f))
                                return -EOPNOTSUPP;
                        break;
+               case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+                       netdev_warn(priv->netdev,
+                                   "IPv6 tunnel decap offload isn't supported\n");
                default:
                        return -EOPNOTSUPP;
                }
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                        MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
                                 key->flags & FLOW_DIS_IS_FRAGMENT);
+
+                       /* the HW doesn't need L3 inline to match on frag=no */
+                       if (key->flags & FLOW_DIS_IS_FRAGMENT)
+                               *min_inline = MLX5_INLINE_MODE_IP;
                }
        }
 
@@ -647,17 +669,14 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 
 #if IS_ENABLED(CONFIG_INET)
        rt = ip_route_output_key(dev_net(mirred_dev), fl4);
-       if (IS_ERR(rt)) {
-               pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
-               return -EOPNOTSUPP;
-       }
+       if (IS_ERR(rt))
+               return PTR_ERR(rt);
 #else
        return -EOPNOTSUPP;
 #endif
 
        if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-               pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
-                       __func__);
+               pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
                ip_rt_put(rt);
                return -EOPNOTSUPP;
        }
@@ -718,12 +737,12 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                                          struct net_device **out_dev)
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       struct neighbour *n = NULL;
        struct flowi4 fl4 = {};
-       struct neighbour *n;
        char *encap_header;
        int encap_size;
-       __be32 saddr;
-       int ttl;
+       __be32 saddr = 0;
+       int ttl = 0;
        int err;
 
        encap_header = kzalloc(max_encap_size, GFP_KERNEL);
@@ -750,7 +769,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        e->out_dev = *out_dev;
 
        if (!(n->nud_state & NUD_VALID)) {
-               err = -ENOTSUPP;
+               pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+               err = -EOPNOTSUPP;
                goto out;
        }
 
@@ -772,6 +792,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
                               encap_size, encap_header, &e->encap_id);
 out:
+       if (err && n)
+               neigh_release(n);
        kfree(encap_header);
        return err;
 }
@@ -792,9 +814,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        int tunnel_type;
        int err;
 
-       /* udp dst port must be given */
+       /* udp dst port must be set */
        if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+               goto vxlan_encap_offload_err;
+
+       /* setting udp src port isn't supported */
+       if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
+vxlan_encap_offload_err:
+               netdev_warn(priv->netdev,
+                           "must set udp dst port and not set udp src port\n");
                return -EOPNOTSUPP;
+       }
 
        if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +832,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                info.tun_id = tunnel_id_to_key32(key->tun_id);
                tunnel_type = MLX5_HEADER_TYPE_VXLAN;
        } else {
+               netdev_warn(priv->netdev,
+                           "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
                return -EOPNOTSUPP;
        }
 
@@ -809,6 +841,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        case AF_INET:
                info.daddr = key->u.ipv4.dst;
                break;
+       case AF_INET6:
+               netdev_warn(priv->netdev,
+                           "IPv6 tunnel encap offload isn't supported\n");
        default:
                return -EOPNOTSUPP;
        }
@@ -986,7 +1021,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
 
        if (IS_ERR(flow->rule)) {
                err = PTR_ERR(flow->rule);
-               goto err_free;
+               goto err_del_rule;
        }
 
        err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1032,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
        goto out;
 
 err_del_rule:
-       mlx5_del_flow_rules(flow->rule);
+       mlx5e_tc_del_flow(priv, flow);
 
 err_free:
        kfree(flow);
index d6807c3cc461f001aa01072e395adf2e93095bfd..f14d9c9ba77394b83aea50564afd3c762613467a 100644 (file)
@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
 
        if (!ESW_ALLOWED(esw))
                return -EPERM;
-       if (!LEGAL_VPORT(esw, vport))
+       if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
                return -EINVAL;
 
        mutex_lock(&esw->state_lock);
index 466e161010f759e08ab2253326da4f2a0192aa1d..03293ed1cc22d2716ff5708dc2312b7291cc1899 100644 (file)
@@ -695,6 +695,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
                if (err)
                        goto err_reps;
        }
+
+       /* disable PF RoCE so missed packets don't go through RoCE steering */
+       mlx5_dev_list_lock();
+       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return 0;
 
 err_reps:
@@ -718,6 +724,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
        int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
        if (err) {
index a263d8904a4cf84de718ad9dd4b2d8ddca9a5194..0ac7a2fc916c438bc535b20d45964009747f0b33 100644 (file)
@@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
        nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
        handle = add_rule_fte(fte, fg, dest, dest_num, false);
        if (IS_ERR(handle)) {
+               unlock_ref_node(&fte->node);
                kfree(fte);
                goto unlock_fg;
        }
index 54e5a786f1915deadf3eb181beeef0bf325a178e..d01e9f21d4691ea497aa7ea0666c83e330c078bb 100644 (file)
@@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
                 to_fw_pkey_sz(dev, 128));
 
+       /* Check log_max_qp from HCA caps to set in current profile */
+       if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+               mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+                              profile[prof_sel].log_max_qp,
+                              MLX5_CAP_GEN_MAX(dev, log_max_qp));
+               profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+       }
        if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
                MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
                         prof->log_max_qp);
@@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        struct mlx5_priv *priv  = &mdev->priv;
        struct msix_entry *msix = priv->msix_arr;
        int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-       int numa_node           = priv->numa_node;
        int err;
 
        if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
                return -ENOMEM;
        }
 
-       cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+       cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
                        priv->irq_info[i].mask);
 
        err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
@@ -1189,6 +1195,9 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 {
        int err = 0;
 
+       if (cleanup)
+               mlx5_drain_health_wq(dev);
+
        mutex_lock(&dev->intf_state_mutex);
        if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
@@ -1351,7 +1360,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
 
        mlx5_enter_error_state(dev);
        mlx5_unload_one(dev, priv, false);
-       /* In case of kernel call save the pci state and drain health wq */
+       /* In case of kernel call save the pci state and drain the health wq */
        if (state) {
                pci_save_state(pdev);
                mlx5_drain_health_wq(dev);
index 44389c90056a0f197a97f5d36478ec597f266004..8f1623bf2134700498198a98cb6aca9dddd2a6cd 100644 (file)
@@ -696,7 +696,7 @@ enum rtl_tx_desc_bit_1 {
 enum rtl_rx_desc_bit {
        /* Rx private */
        PID1            = (1 << 18), /* Protocol ID bit 1/2 */
-       PID0            = (1 << 17), /* Protocol ID bit 2/2 */
+       PID0            = (1 << 17), /* Protocol ID bit 0/2 */
 
 #define RxProtoUDP     (PID1)
 #define RxProtoTCP     (PID0)
index f341c1bc7001678326985746bd0ce65d1572994a..f729a6b43958cc82a1b2d38293cb50baf767f39a 100644 (file)
@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
+       .hw_crc         = 1,
        .tsu            = 1,
        .select_mii     = 1,
        .shift_rd0      = 1,
@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -819,6 +820,7 @@ static struct sh_eth_cpu_data sh7734_data = {
        .tsu            = 1,
        .hw_crc         = 1,
        .select_mii     = 1,
+       .shift_rd0      = 1,
 };
 
 /* SH7763 */
@@ -831,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -1656,7 +1658,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        else
                goto out;
 
-       if (!likely(mdp->irq_enabled)) {
+       if (unlikely(!mdp->irq_enabled)) {
                sh_eth_write(ndev, 0, EESIPR);
                goto out;
        }
index de2947ccc5ad7d30d9cb7bc68483154cb9d119cd..5eb0e684fd76a3de1f46210f9a9b6118d98041e3 100644 (file)
@@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
        }
 
        /* don't fail init if RSS setup doesn't work */
-       efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+       rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+       efx->rss_active = (rc == 0);
 
        return 0;
 }
index 87bdc56b4e3a636450e4f39e49b37606332d8268..18ebaea44e8257255c6a910958e4b00466600903 100644 (file)
@@ -975,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
 
        case ETHTOOL_GRXFH: {
                info->data = 0;
+               if (!efx->rss_active) /* No RSS */
+                       return 0;
                switch (info->flow_type) {
                case UDP_V4_FLOW:
                        if (efx->rx_hash_udp_4tuple)
index 1a635ced62d0581634748d4b8bdcb15a2e0da69c..1c62c1a00fca49679cb8a69a1ac1a5564be9868d 100644 (file)
@@ -860,6 +860,7 @@ struct vfdi_status;
  * @rx_hash_key: Toeplitz hash key for RSS
  * @rx_indir_table: Indirection table for RSS
  * @rx_scatter: Scatter mode enabled for receives
+ * @rss_active: RSS enabled on hardware
  * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
  * @int_error_count: Number of internal errors seen recently
  * @int_error_expire: Time at which error count will be expired
@@ -998,6 +999,7 @@ struct efx_nic {
        u8 rx_hash_key[40];
        u32 rx_indir_table[128];
        bool rx_scatter;
+       bool rss_active;
        bool rx_hash_udp_4tuple;
 
        unsigned int_error_count;
index a3901bc96586e5eff4f58105032ea2565c083769..4e54e5dc9fcb49bf03667843d47a4a2cf1aa978b 100644 (file)
@@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
        efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
        siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+       efx->rss_active = true;
 
        /* Enable event logging */
        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
index c35597586121e5c3b41f6453711e3ea9920af463..3dc7d279f80513a948d2b855c5a09e16f63d5686 100644 (file)
@@ -60,8 +60,9 @@ struct oxnas_dwmac {
        struct regmap   *regmap;
 };
 
-static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
 {
+       struct oxnas_dwmac *dwmac = priv;
        unsigned int value;
        int ret;
 
@@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
        return 0;
 }
 
+static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+       struct oxnas_dwmac *dwmac = priv;
+
+       clk_disable_unprepare(dwmac->clk);
+}
+
 static int oxnas_dwmac_probe(struct platform_device *pdev)
 {
        struct plat_stmmacenet_data *plat_dat;
        struct stmmac_resources stmmac_res;
-       struct device_node *sysctrl;
        struct oxnas_dwmac *dwmac;
        int ret;
 
-       sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
-       if (!sysctrl) {
-               dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
-               return -EINVAL;
-       }
-
        ret = stmmac_get_platform_resources(pdev, &stmmac_res);
        if (ret)
                return ret;
@@ -128,72 +129,48 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
                return PTR_ERR(plat_dat);
 
        dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-       if (!dwmac)
-               return -ENOMEM;
+       if (!dwmac) {
+               ret = -ENOMEM;
+               goto err_remove_config_dt;
+       }
 
        dwmac->dev = &pdev->dev;
        plat_dat->bsp_priv = dwmac;
+       plat_dat->init = oxnas_dwmac_init;
+       plat_dat->exit = oxnas_dwmac_exit;
 
-       dwmac->regmap = syscon_node_to_regmap(sysctrl);
+       dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                       "oxsemi,sys-ctrl");
        if (IS_ERR(dwmac->regmap)) {
                dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
-               return PTR_ERR(dwmac->regmap);
+               ret = PTR_ERR(dwmac->regmap);
+               goto err_remove_config_dt;
        }
 
        dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
-       if (IS_ERR(dwmac->clk))
-               return PTR_ERR(dwmac->clk);
+       if (IS_ERR(dwmac->clk)) {
+               ret = PTR_ERR(dwmac->clk);
+               goto err_remove_config_dt;
+       }
 
-       ret = oxnas_dwmac_init(dwmac);
+       ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
        if (ret)
-               return ret;
+               goto err_remove_config_dt;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
        if (ret)
-               clk_disable_unprepare(dwmac->clk);
+               goto err_dwmac_exit;
 
-       return ret;
-}
 
-static int oxnas_dwmac_remove(struct platform_device *pdev)
-{
-       struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-       int ret = stmmac_dvr_remove(&pdev->dev);
-
-       clk_disable_unprepare(dwmac->clk);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int oxnas_dwmac_suspend(struct device *dev)
-{
-       struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-       int ret;
-
-       ret = stmmac_suspend(dev);
-       clk_disable_unprepare(dwmac->clk);
-
-       return ret;
-}
-
-static int oxnas_dwmac_resume(struct device *dev)
-{
-       struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-       int ret;
-
-       ret = oxnas_dwmac_init(dwmac);
-       if (ret)
-               return ret;
+       return 0;
 
-       ret = stmmac_resume(dev);
+err_dwmac_exit:
+       oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
 
        return ret;
 }
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
-       oxnas_dwmac_suspend, oxnas_dwmac_resume);
 
 static const struct of_device_id oxnas_dwmac_match[] = {
        { .compatible = "oxsemi,ox820-dwmac" },
@@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
 
 static struct platform_driver oxnas_dwmac_driver = {
        .probe  = oxnas_dwmac_probe,
-       .remove = oxnas_dwmac_remove,
+       .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "oxnas-dwmac",
-               .pm             = &oxnas_dwmac_pm_ops,
+               .pm             = &stmmac_pltfr_pm_ops,
                .of_match_table = oxnas_dwmac_match,
        },
 };
index bb40382e205deffd9a3312099723b72f84da18a5..a276a32d57f24374444ec06c02b3707fec8fae0a 100644 (file)
@@ -3319,8 +3319,16 @@ int stmmac_dvr_probe(struct device *device,
                ndev->max_mtu = JUMBO_LEN;
        else
                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
-       if (priv->plat->maxmtu < ndev->max_mtu)
+       /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+        * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+        */
+       if ((priv->plat->maxmtu < ndev->max_mtu) &&
+           (priv->plat->maxmtu >= ndev->min_mtu))
                ndev->max_mtu = priv->plat->maxmtu;
+       else if (priv->plat->maxmtu < ndev->min_mtu)
+               netdev_warn(priv->dev,
+                           "%s: warning: maxmtu having invalid value (%d)\n",
+                           __func__, priv->plat->maxmtu);
 
        if (flow_ctrl)
                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
@@ -3339,13 +3347,6 @@ int stmmac_dvr_probe(struct device *device,
 
        spin_lock_init(&priv->lock);
 
-       ret = register_netdev(ndev);
-       if (ret) {
-               netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
-                          __func__, ret);
-               goto error_netdev_register;
-       }
-
        /* If a specific clk_csr value is passed from the platform
         * this means that the CSR Clock Range selection cannot be
         * changed at run-time and it is fixed. Viceversa the driver'll try to
@@ -3372,11 +3373,21 @@ int stmmac_dvr_probe(struct device *device,
                }
        }
 
-       return 0;
+       ret = register_netdev(ndev);
+       if (ret) {
+               netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
+                          __func__, ret);
+               goto error_netdev_register;
+       }
+
+       return ret;
 
-error_mdio_register:
-       unregister_netdev(ndev);
 error_netdev_register:
+       if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+           priv->hw->pcs != STMMAC_PCS_TBI &&
+           priv->hw->pcs != STMMAC_PCS_RTBI)
+               stmmac_mdio_unregister(ndev);
+error_mdio_register:
        netif_napi_del(&priv->napi);
 error_hw_init:
        clk_disable_unprepare(priv->pclk);
index a2831773431a6c19d185b2e82e50897aaf7ef7d8..3da4737620cb3fbf50e16c2cf61de3dbfd3e2355 100644 (file)
@@ -89,6 +89,9 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
 
        /* Set default value for unicast filter entries */
        plat->unicast_filter_entries = 1;
+
+       /* Set the maxmtu to a default of JUMBO_LEN */
+       plat->maxmtu = JUMBO_LEN;
 }
 
 static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -126,6 +129,9 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
        /* Set default value for unicast filter entries */
        plat->unicast_filter_entries = 1;
 
+       /* Set the maxmtu to a default of JUMBO_LEN */
+       plat->maxmtu = JUMBO_LEN;
+
        return 0;
 }
 
index d361835b315dd6b9ed542a48515e85c8024192d3..8dbd59baa34d5ed9eda97396f38b5ab38e96416a 100644 (file)
@@ -279,6 +279,7 @@ config MARVELL_PHY
 
 config MESON_GXL_PHY
        tristate "Amlogic Meson GXL Internal PHY"
+       depends on ARCH_MESON || COMPILE_TEST
        ---help---
          Currently has a driver for the Amlogic Meson GXL Internal PHY
 
index 1b639242f9e23170e69f7b7669ba82d8d4263fb5..e84ae084e259c90649277532f64e563e1091bf28 100644 (file)
@@ -29,6 +29,7 @@
 #define MII_DP83867_MICR       0x12
 #define MII_DP83867_ISR                0x13
 #define DP83867_CTRL           0x1f
+#define DP83867_CFG3           0x1e
 
 /* Extended Registers */
 #define DP83867_RGMIICTL       0x0032
@@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
                micr_status |=
                        (MII_DP83867_MICR_AN_ERR_INT_EN |
                        MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+                       MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+                       MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
                        MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
                        MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
 
@@ -214,6 +217,13 @@ static int dp83867_config_init(struct phy_device *phydev)
                }
        }
 
+       /* Enable Interrupt output INT_OE in CFG3 register */
+       if (phy_interrupt_is_valid(phydev)) {
+               val = phy_read(phydev, DP83867_CFG3);
+               val |= BIT(7);
+               phy_write(phydev, DP83867_CFG3, val);
+       }
+
        return 0;
 }
 
index e269262471a44fdcbfc9ef3c5833bf3ceb78624e..0b78210c0fa74e88b2ef5e27d4a7e6c78e9fa45c 100644 (file)
@@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev)
        int err;
 
        /* Check the fiber mode first */
-       if (phydev->supported & SUPPORTED_FIBRE) {
+       if (phydev->supported & SUPPORTED_FIBRE &&
+           phydev->interface != PHY_INTERFACE_MODE_SGMII) {
                err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
                if (err < 0)
                        goto error;
index 25f93a98863b79be76ac72b814d5dd4ff75c89a9..48da6e93c3f783e07f61ae24151e3114ac8dc1ae 100644 (file)
@@ -1065,6 +1065,15 @@ void phy_state_machine(struct work_struct *work)
                        if (old_link != phydev->link)
                                phydev->state = PHY_CHANGELINK;
                }
+               /*
+                * Failsafe: check that nobody set phydev->link=0 between two
+                * poll cycles, otherwise we won't leave RUNNING state as long
+                * as link remains down.
+                */
+               if (!phydev->link && phydev->state == PHY_RUNNING) {
+                       phydev->state = PHY_CHANGELINK;
+                       phydev_err(phydev, "no link in PHY_RUNNING\n");
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index 6c646e228833c07b0369c28684f0a6b4aa27cf2c..6e98ede997d3f08d4ac3fa967382b55e4dfc287d 100644 (file)
@@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
        .probe =        usbnet_probe,
        .suspend =      asix_suspend,
        .resume =       asix_resume,
+       .reset_resume = asix_resume,
        .disconnect =   usbnet_disconnect,
        .supports_autosuspend = 1,
        .disable_hub_initiated_lpm = 1,
index 7dc61228c55b8af26f0623fa4cfb9dd512cd97ac..be418563cb18c6132cb29d4201b831072749d43b 100644 (file)
@@ -3576,39 +3576,87 @@ static bool delay_autosuspend(struct r8152 *tp)
                return false;
 }
 
-static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+static int rtl8152_rumtime_suspend(struct r8152 *tp)
 {
-       struct r8152 *tp = usb_get_intfdata(intf);
        struct net_device *netdev = tp->netdev;
        int ret = 0;
 
-       mutex_lock(&tp->control);
+       if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+               u32 rcr = 0;
 
-       if (PMSG_IS_AUTO(message)) {
-               if (netif_running(netdev) && delay_autosuspend(tp)) {
+               if (delay_autosuspend(tp)) {
                        ret = -EBUSY;
                        goto out1;
                }
 
-               set_bit(SELECTIVE_SUSPEND, &tp->flags);
-       } else {
-               netif_device_detach(netdev);
+               if (netif_carrier_ok(netdev)) {
+                       u32 ocp_data;
+
+                       rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+                       ocp_data = rcr & ~RCR_ACPT_ALL;
+                       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+                       rxdy_gated_en(tp, true);
+                       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
+                                                PLA_OOB_CTRL);
+                       if (!(ocp_data & RXFIFO_EMPTY)) {
+                               rxdy_gated_en(tp, false);
+                               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+                               ret = -EBUSY;
+                               goto out1;
+                       }
+               }
+
+               clear_bit(WORK_ENABLE, &tp->flags);
+               usb_kill_urb(tp->intr_urb);
+
+               tp->rtl_ops.autosuspend_en(tp, true);
+
+               if (netif_carrier_ok(netdev)) {
+                       napi_disable(&tp->napi);
+                       rtl_stop_rx(tp);
+                       rxdy_gated_en(tp, false);
+                       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+                       napi_enable(&tp->napi);
+               }
        }
 
+       set_bit(SELECTIVE_SUSPEND, &tp->flags);
+
+out1:
+       return ret;
+}
+
+static int rtl8152_system_suspend(struct r8152 *tp)
+{
+       struct net_device *netdev = tp->netdev;
+       int ret = 0;
+
+       netif_device_detach(netdev);
+
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                napi_disable(&tp->napi);
-               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_stop_rx(tp);
-                       tp->rtl_ops.autosuspend_en(tp, true);
-               } else {
-                       cancel_delayed_work_sync(&tp->schedule);
-                       tp->rtl_ops.down(tp);
-               }
+               cancel_delayed_work_sync(&tp->schedule);
+               tp->rtl_ops.down(tp);
                napi_enable(&tp->napi);
        }
-out1:
+
+       return ret;
+}
+
+static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct r8152 *tp = usb_get_intfdata(intf);
+       int ret;
+
+       mutex_lock(&tp->control);
+
+       if (PMSG_IS_AUTO(message))
+               ret = rtl8152_rumtime_suspend(tp);
+       else
+               ret = rtl8152_system_suspend(tp);
+
        mutex_unlock(&tp->control);
 
        return ret;
index 7532646c3b7bedb89c2cbe95e84f09e32e0a8391..454f907d419a7f87cc0ae1813f40c054726be7e8 100644 (file)
@@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                .flowi4_iif = LOOPBACK_IFINDEX,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+               .flowi4_proto = ip4h->protocol,
                .daddr = ip4h->daddr,
+               .saddr = ip4h->saddr,
        };
        struct net *net = dev_net(vrf_dev);
        struct rtable *rt;
@@ -967,6 +969,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
         */
        need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        if (!ipv6_ndisc_frame(skb) && !need_strict) {
+               vrf_rx_stats(vrf_dev, skb->len);
                skb->dev = vrf_dev;
                skb->skb_iif = vrf_dev->ifindex;
 
@@ -1011,6 +1014,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
                goto out;
        }
 
+       vrf_rx_stats(vrf_dev, skb->len);
+
        skb_push(skb, skb->mac_len);
        dev_queue_xmit_nit(skb, vrf_dev);
        skb_pull(skb, skb->mac_len);
@@ -1247,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
 
        vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+       if (vrf->tb_id == RT_TABLE_UNSPEC)
+               return -EINVAL;
 
        dev->priv_flags |= IFF_L3MDEV_MASTER;
 
index b776a0ab106c0d55b1b7cdf14b79d91621d27aa1..9d9b4e0def2a531344c8fd3d78258e1690340350 100644 (file)
@@ -218,7 +218,7 @@ static int slic_ds26522_probe(struct spi_device *spi)
 
        ret = slic_ds26522_init_configure(spi);
        if (ret == 0)
-               pr_info("DS26522 cs%d configurated\n", spi->chip_select);
+               pr_info("DS26522 cs%d configured\n", spi->chip_select);
 
        return ret;
 }
index bc7397d709d3ac5ff85b9a1057e43a93500587fc..08bc7822f8209d0a9136357edb18683092c86c18 100644 (file)
@@ -16,7 +16,7 @@
 /********************************************************************/
 int orinoco_mic_init(struct orinoco_private *priv)
 {
-       priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+       priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
                                              CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
                return -ENOMEM;
        }
 
-       priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+       priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
                                              CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
 void orinoco_mic_free(struct orinoco_private *priv)
 {
        if (priv->tx_tfm_mic)
-               crypto_free_ahash(priv->tx_tfm_mic);
+               crypto_free_shash(priv->tx_tfm_mic);
        if (priv->rx_tfm_mic)
-               crypto_free_ahash(priv->rx_tfm_mic);
+               crypto_free_shash(priv->rx_tfm_mic);
 }
 
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic)
 {
-       AHASH_REQUEST_ON_STACK(req, tfm_michael);
-       struct scatterlist sg[2];
+       SHASH_DESC_ON_STACK(desc, tfm_michael);
        u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
        int err;
 
@@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
        hdr[ETH_ALEN * 2 + 2] = 0;
        hdr[ETH_ALEN * 2 + 3] = 0;
 
-       /* Use scatter gather to MIC header and data in one go */
-       sg_init_table(sg, 2);
-       sg_set_buf(&sg[0], hdr, sizeof(hdr));
-       sg_set_buf(&sg[1], data, data_len);
+       desc->tfm = tfm_michael;
+       desc->flags = 0;
 
-       if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
-               return -1;
+       err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
+       if (err)
+               return err;
+
+       err = crypto_shash_init(desc);
+       if (err)
+               return err;
+
+       err = crypto_shash_update(desc, hdr, sizeof(hdr));
+       if (err)
+               return err;
+
+       err = crypto_shash_update(desc, data, data_len);
+       if (err)
+               return err;
+
+       err = crypto_shash_final(desc, mic);
+       shash_desc_zero(desc);
 
-       ahash_request_set_tfm(req, tfm_michael);
-       ahash_request_set_callback(req, 0, NULL, NULL);
-       ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
-       err = crypto_ahash_digest(req);
-       ahash_request_zero(req);
        return err;
 }
index ce731d05cc98cd2d0b6415f66868d3ad517b4753..e8724e8892194bb3e732078b3561ab031071a061 100644 (file)
@@ -6,6 +6,7 @@
 #define _ORINOCO_MIC_H_
 
 #include <linux/types.h>
+#include <crypto/hash.h>
 
 #define MICHAEL_MIC_LEN 8
 
@@ -15,7 +16,7 @@ struct crypto_ahash;
 
 int orinoco_mic_init(struct orinoco_private *priv);
 void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic);
 
index 2f0c84b1c440cd1160bdebc3cabcb13be110a73e..5fa1c3e3713f835387353ba781cbb24ce95df6dc 100644 (file)
@@ -152,8 +152,8 @@ struct orinoco_private {
        u8 *wpa_ie;
        int wpa_ie_len;
 
-       struct crypto_ahash *rx_tfm_mic;
-       struct crypto_ahash *tx_tfm_mic;
+       struct crypto_shash *rx_tfm_mic;
+       struct crypto_shash *tx_tfm_mic;
 
        unsigned int wpa_enabled:1;
        unsigned int tkip_cm_active:1;
index 0a508649903d73fe4b9a643a5e7e43577abc22c9..49015b05f3d1a048ee6e3ac00426ea76cd5bab44 100644 (file)
@@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf,
                return -ENOMEM;
        }
        rtlpriv = hw->priv;
+       rtlpriv->hw = hw;
        rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
                                    GFP_KERNEL);
        if (!rtlpriv->usb_data)
index b40cfb076f02446fda62aea907301e4a62dcc17a..8a3c3e32a704b3e359a78ed06e95c6e977f322bc 100644 (file)
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
        if (ret)
                return ret;
 
-       /* Checking for ctrl->tagset is a trick to avoid sleeping on module
-        * load, since we only need the quirk on reset_controller. Notice
-        * that the HGST device needs this delay only in firmware activation
-        * procedure; unfortunately we have no (easy) way to verify this.
-        */
-       if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+       if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
                msleep(NVME_QUIRK_DELAY_AMOUNT);
 
        return nvme_wait_ready(ctrl, cap, false);
@@ -1193,8 +1188,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->stripe_size)
-               blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+               blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                vwc = true;
@@ -1250,19 +1245,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        ctrl->max_hw_sectors =
                min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
 
-       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
-               unsigned int max_hw_sectors;
-
-               ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
-               max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
-               if (ctrl->max_hw_sectors) {
-                       ctrl->max_hw_sectors = min(max_hw_sectors,
-                                                       ctrl->max_hw_sectors);
-               } else {
-                       ctrl->max_hw_sectors = max_hw_sectors;
-               }
-       }
-
        nvme_set_queue_limits(ctrl, ctrl->admin_q);
        ctrl->sgls = le32_to_cpu(id->sgls);
        ctrl->kas = le16_to_cpu(id->kas);
index 771e2e76187222dfb71616f5665c7b2b22802c74..fcc9dcfdf67517d1352bef388a50d2dbf6c9a129 100644 (file)
@@ -1491,19 +1491,20 @@ static int
 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
 {
        struct nvme_fc_queue *queue = &ctrl->queues[1];
-       int i, j, ret;
+       int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++, queue++) {
                ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
-               if (ret) {
-                       for (j = i-1; j >= 0; j--)
-                               __nvme_fc_delete_hw_queue(ctrl,
-                                               &ctrl->queues[j], j);
-                       return ret;
-               }
+               if (ret)
+                       goto delete_queues;
        }
 
        return 0;
+
+delete_queues:
+       for (; i >= 0; i--)
+               __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+       return ret;
 }
 
 static int
@@ -1653,13 +1654,12 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
                struct nvme_fc_fcp_op *op)
 {
        struct nvmefc_fcp_req *freq = &op->fcp_req;
-       u32 map_len = nvme_map_len(rq);
        enum dma_data_direction dir;
        int ret;
 
        freq->sg_cnt = 0;
 
-       if (!map_len)
+       if (!blk_rq_payload_bytes(rq))
                return 0;
 
        freq->sg_table.sgl = freq->first_sgl;
@@ -1853,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                return ret;
 
-       data_len = nvme_map_len(rq);
+       data_len = blk_rq_payload_bytes(rq);
        if (data_len)
                io_dir = ((rq_data_dir(rq) == WRITE) ?
                                        NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
@@ -2401,8 +2401,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        WARN_ON_ONCE(!changed);
 
        dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
-               ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+               "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+               ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
 
        kref_get(&ctrl->ctrl.kref);
 
index bd5321441d127143b87563e5463d2944aa0c1b0e..aead6d08ed2c83b4f67e9087b0ca446247b57d18 100644 (file)
@@ -135,7 +135,6 @@ struct nvme_ctrl {
 
        u32 page_size;
        u32 max_hw_sectors;
-       u32 stripe_size;
        u16 oncs;
        u16 vid;
        atomic_t abort_limit;
@@ -226,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
        return (sector >> (ns->lba_shift - 9));
 }
 
-static inline unsigned nvme_map_len(struct request *rq)
-{
-       if (req_op(rq) == REQ_OP_DISCARD)
-               return sizeof(struct nvme_dsm_range);
-       else
-               return blk_rq_bytes(rq);
-}
-
 static inline void nvme_cleanup_cmd(struct request *req)
 {
        if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
index 3d21a154dce79deceeff77cd16ef5c6bf2a71978..3faefabf339c98d221373437e411512543d29263 100644 (file)
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
        return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
-static int nvme_init_iod(struct request *rq, unsigned size,
-               struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
        int nseg = blk_rq_nr_phys_segments(rq);
+       unsigned int size = blk_rq_payload_bytes(rq);
 
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
                iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
 }
 #endif
 
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
-               int total_len)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct dma_pool *pool;
-       int length = total_len;
+       int length = blk_rq_payload_bytes(req);
        struct scatterlist *sg = iod->sg;
        int dma_len = sg_dma_len(sg);
        u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
 }
 
 static int nvme_map_data(struct nvme_dev *dev, struct request *req,
-               unsigned size, struct nvme_command *cmnd)
+               struct nvme_command *cmnd)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
                                DMA_ATTR_NO_WARN))
                goto out;
 
-       if (!nvme_setup_prps(dev, req, size))
+       if (!nvme_setup_prps(dev, req))
                goto out_unmap;
 
        ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_dev *dev = nvmeq->dev;
        struct request *req = bd->rq;
        struct nvme_command cmnd;
-       unsigned map_len;
        int ret = BLK_MQ_RQ_QUEUE_OK;
 
        /*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret != BLK_MQ_RQ_QUEUE_OK)
                return ret;
 
-       map_len = nvme_map_len(req);
-       ret = nvme_init_iod(req, map_len, dev);
+       ret = nvme_init_iod(req, dev);
        if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out_free_cmd;
 
        if (blk_rq_nr_phys_segments(req))
-               ret = nvme_map_data(dev, req, map_len, &cmnd);
+               ret = nvme_map_data(dev, req, &cmnd);
 
        if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out_cleanup_iod;
@@ -712,15 +709,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
                req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
                nvme_req(req)->result = cqe.result;
                blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
-
        }
 
-       /* If the controller ignores the cq head doorbell and continuously
-        * writes to the queue, it is theoretically possible to wrap around
-        * the queue twice and mistakenly return IRQ_NONE.  Linux only
-        * requires that 0.1% of your interrupts are handled, so this isn't
-        * a big problem.
-        */
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return;
 
@@ -1909,10 +1899,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (!dev->bar)
                goto release;
 
-       return 0;
+       return 0;
   release:
-       pci_release_mem_regions(pdev);
-       return -ENODEV;
+       pci_release_mem_regions(pdev);
+       return -ENODEV;
 }
 
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index f587af345889eb1b32a1f4f87dbe0c9a3a30ae42..557f29b1f1bb23e4e5cded156db21b4f981b585a 100644 (file)
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
 }
 
 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
-               struct request *rq, unsigned int map_len,
-               struct nvme_command *c)
+               struct request *rq, struct nvme_command *c)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
        }
 
        if (count == 1) {
-               if (rq_data_dir(rq) == WRITE &&
-                   map_len <= nvme_rdma_inline_data_size(queue) &&
-                   nvme_rdma_queue_idx(queue))
+               if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+                   blk_rq_payload_bytes(rq) <=
+                               nvme_rdma_inline_data_size(queue))
                        return nvme_rdma_map_sg_inline(queue, req, c);
 
                if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-               struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+               struct nvme_command *cmd = nvme_req(rq)->cmd;
 
                if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
                    cmd->common.opcode != nvme_fabrics_command ||
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *c = sqe->data;
        bool flush = false;
        struct ib_device *dev;
-       unsigned int map_len;
        int ret;
 
        WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        blk_mq_start_request(rq);
 
-       map_len = nvme_map_len(rq);
-       ret = nvme_rdma_map_data(queue, rq, map_len, c);
+       ret = nvme_rdma_map_data(queue, rq, c);
        if (ret < 0) {
                dev_err(queue->ctrl->ctrl.device,
                             "Failed to map data (%d)\n", ret);
index b71e95044b43e3e62a5d063047064a46d88a6d9f..a5c09e703bd8636d96c9c0c7d603226e46518f61 100644 (file)
@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
        return nvme_trans_status_code(hdr, nvme_sc);
 }
 
-static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-                                                       u8 *cmd)
-{
-       u8 immed, no_flush;
-
-       immed = cmd[1] & 0x01;
-       no_flush = cmd[4] & 0x04;
-
-       if (immed != 0) {
-               return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-       } else {
-               if (no_flush == 0) {
-                       /* Issue NVME FLUSH command prior to START STOP UNIT */
-                       int res = nvme_trans_synchronize_cache(ns, hdr);
-                       if (res)
-                               return res;
-               }
-
-               return 0;
-       }
-}
-
 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                                                        u8 *cmd)
 {
@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
        case SECURITY_PROTOCOL_OUT:
                retcode = nvme_trans_security_protocol(ns, hdr, cmd);
                break;
-       case START_STOP:
-               retcode = nvme_trans_start_stop(ns, hdr, cmd);
-               break;
        case SYNCHRONIZE_CACHE:
                retcode = nvme_trans_synchronize_cache(ns, hdr);
                break;
index ec1ad2aa0a4ca941e8fe51db94cc7ffa452c1693..95ae52390478fe62fdb59605ee2c7a6d0583a919 100644 (file)
@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
 {
        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
-       u64 val;
        u32 val32;
        u16 status = 0;
 
@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
                        (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
                break;
        case NVME_FEAT_KATO:
-               val = le64_to_cpu(req->cmd->prop_set.value);
-               val32 = val & 0xffff;
+               val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
                req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
                nvmet_set_result(req, req->sq->ctrl->kato);
                break;
index bcb8ebeb01c5d26515c8047e4eed765dbdc4da45..4e8e6a22bce162a61eec428e9c435acb26b74046 100644 (file)
@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
        rport->lport = nport->lport;
        nport->rport = rport;
 
-       return ret ? ret : count;
+       return count;
 }
 
 
@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
        tport->lport = nport->lport;
        nport->tport = tport;
 
-       return ret ? ret : count;
+       return count;
 }
 
 
index 965911d9b36a77af3867ff6361169f977e71392b..398ea7f54826b6bef0db0aa35aa2b1bb8493f432 100644 (file)
@@ -981,8 +981,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
  * @cell: nvmem cell to be read.
  * @len: pointer to length of cell which will be populated on successful read.
  *
- * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
- * The buffer should be freed by the consumer with a kfree().
+ * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
+ * buffer should be freed by the consumer with a kfree().
  */
 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
 {
index ac27b9bac3b9a9abe219e7d60fab8ea946c0abd8..8e7b120696fa91da63a44bc7f4f8422b5d24bc7e 100644 (file)
@@ -71,7 +71,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
 
 static const struct of_device_id imx_ocotp_dt_ids[] = {
        { .compatible = "fsl,imx6q-ocotp",  (void *)128 },
-       { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
+       { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
        { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
        { },
 };
index b5305f08b1848bf123f3448fba123a2761e17286..2bdb6c3893281c65d12a3916c037c07acba8480d 100644 (file)
@@ -21,11 +21,11 @@ static int qfprom_reg_read(void *context,
                        unsigned int reg, void *_val, size_t bytes)
 {
        void __iomem *base = context;
-       u32 *val = _val;
-       int i = 0, words = bytes / 4;
+       u8 *val = _val;
+       int i = 0, words = bytes;
 
        while (words--)
-               *val++ = readl(base + reg + (i++ * 4));
+               *val++ = readb(base + reg + i++);
 
        return 0;
 }
@@ -34,11 +34,11 @@ static int qfprom_reg_write(void *context,
                         unsigned int reg, void *_val, size_t bytes)
 {
        void __iomem *base = context;
-       u32 *val = _val;
-       int i = 0, words = bytes / 4;
+       u8 *val = _val;
+       int i = 0, words = bytes;
 
        while (words--)
-               writel(*val++, base + reg + (i++ * 4));
+               writeb(*val++, base + reg + i++);
 
        return 0;
 }
@@ -53,7 +53,7 @@ static int qfprom_remove(struct platform_device *pdev)
 static struct nvmem_config econfig = {
        .name = "qfprom",
        .owner = THIS_MODULE,
-       .stride = 4,
+       .stride = 1,
        .word_size = 1,
        .reg_read = qfprom_reg_read,
        .reg_write = qfprom_reg_write,
index a579126832afde2d758d629f58338af2d9e9378f..620c231a2889ef6879269a80ccbfaa3e4493958f 100644 (file)
@@ -212,7 +212,7 @@ static int meson_pmx_request_gpio(struct pinctrl_dev *pcdev,
 {
        struct meson_pinctrl *pc = pinctrl_dev_get_drvdata(pcdev);
 
-       meson_pmx_disable_other_groups(pc, range->pin_base + offset, -1);
+       meson_pmx_disable_other_groups(pc, offset, -1);
 
        return 0;
 }
index aea310a918210ebc96bcb2739ac3c50d10329f8d..c9a146948192dba19ca5da1587791c25b315d628 100644 (file)
@@ -382,26 +382,21 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        int ret = 0;
        u32 pin_reg;
-       unsigned long flags;
-       bool level_trig;
-       u32 active_level;
+       unsigned long flags, irq_flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
 
        spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
 
-       /*
-        * When level_trig is set EDGE and active_level is set HIGH in BIOS
-        * default settings, ignore incoming settings from client and use
-        * BIOS settings to configure GPIO register.
+       /* Ignore the settings coming from the client and
+        * read the values from the ACPI tables
+        * while setting the trigger type
         */
-       level_trig = !(pin_reg & (LEVEL_TRIGGER << LEVEL_TRIG_OFF));
-       active_level = pin_reg & (ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 
-       if(level_trig &&
-          ((active_level >> ACTIVE_LEVEL_OFF) == ACTIVE_HIGH))
-               type = IRQ_TYPE_EDGE_FALLING;
+       irq_flags = irq_get_trigger_type(d->irq);
+       if (irq_flags != IRQ_TYPE_NONE)
+               type = irq_flags;
 
        switch (type & IRQ_TYPE_SENSE_MASK) {
        case IRQ_TYPE_EDGE_RISING:
index 12f7d1eb65bc71e891f3eb88a750953dbd1ce68b..07409fde02b23f85005c0403ba146fce50cde970 100644 (file)
@@ -56,6 +56,17 @@ static const struct samsung_pin_bank_type bank_type_alive = {
        .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
 };
 
+/* Exynos5433 has the 4bit widths for PINCFG_TYPE_DRV bitfields. */
+static const struct samsung_pin_bank_type exynos5433_bank_type_off = {
+       .fld_width = { 4, 1, 2, 4, 2, 2, },
+       .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
+       .fld_width = { 4, 1, 2, 4, },
+       .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
 static void exynos_irq_mask(struct irq_data *irqd)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -1335,82 +1346,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
 
 /* pin banks of exynos5433 pin-controller - ALIVE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks0[] = {
-       EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
-       EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
-       EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
-       EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
 };
 
 /* pin banks of exynos5433 pin-controller - AUD */
 static const struct samsung_pin_bank_data exynos5433_pin_banks1[] = {
-       EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
-       EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
 };
 
 /* pin banks of exynos5433 pin-controller - CPIF */
 static const struct samsung_pin_bank_data exynos5433_pin_banks2[] = {
-       EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - eSE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks3[] = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FINGER */
 static const struct samsung_pin_bank_data exynos5433_pin_banks4[] = {
-       EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FSYS */
 static const struct samsung_pin_bank_data exynos5433_pin_banks5[] = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
-       EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
-       EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
 };
 
 /* pin banks of exynos5433 pin-controller - IMEM */
 static const struct samsung_pin_bank_data exynos5433_pin_banks6[] = {
-       EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - NFC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks7[] = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - PERIC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks8[] = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
-       EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
-       EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
-       EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
-       EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
-       EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
-       EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
-       EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
-       EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
-       EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
-       EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
-       EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
-       EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
 };
 
 /* pin banks of exynos5433 pin-controller - TOUCH */
 static const struct samsung_pin_bank_data exynos5433_pin_banks9[] = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
 };
 
 /*
index 5821525a2c8473fa026b3dc84cd6eb999c243b2a..a473092fb8d2362f11a1a48beb7f545b7c9f25ce 100644 (file)
                .pctl_res_idx   = pctl_idx,             \
        }                                               \
 
+#define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs)         \
+       {                                                       \
+               .type           = &exynos5433_bank_type_off,    \
+               .pctl_offset    = reg,                          \
+               .nr_pins        = pins,                         \
+               .eint_type      = EINT_TYPE_GPIO,               \
+               .eint_offset    = offs,                         \
+               .name           = id                            \
+       }
+
+#define EXYNOS5433_PIN_BANK_EINTW(pins, reg, id, offs)         \
+       {                                                       \
+               .type           = &exynos5433_bank_type_alive,  \
+               .pctl_offset    = reg,                          \
+               .nr_pins        = pins,                         \
+               .eint_type      = EINT_TYPE_WKUP,               \
+               .eint_offset    = offs,                         \
+               .name           = id                            \
+       }
+
+#define EXYNOS5433_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
+       {                                                       \
+               .type           = &exynos5433_bank_type_alive,  \
+               .pctl_offset    = reg,                          \
+               .nr_pins        = pins,                         \
+               .eint_type      = EINT_TYPE_WKUP,               \
+               .eint_offset    = offs,                         \
+               .name           = id,                           \
+               .pctl_res_idx   = pctl_idx,                     \
+       }                                                       \
+
 /**
  * struct exynos_weint_data: irq specific data for all the wakeup interrupts
  * generated by the external wakeup interrupt controller.
index 5fe8be089b8b2dc51ced608e0790c7ce05b8dd18..59aa8e302bc3f9ab9dd26b43e6bf53ef812631fd 100644 (file)
@@ -1034,7 +1034,7 @@ config SURFACE_PRO3_BUTTON
 
 config SURFACE_3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
-       depends on ACPI && KEYBOARD_GPIO
+       depends on ACPI && KEYBOARD_GPIO && I2C
        ---help---
          This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
 
index 61f39abf5dc8f24b738a2dafc5117b40a2521fd5..82d67715ce76d9ff0442ecdcdbd2dbb324380203 100644 (file)
@@ -177,43 +177,43 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
 
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
 static enum led_brightness logolamp_get(struct led_classdev *cdev);
-static void logolamp_set(struct led_classdev *cdev,
+static int logolamp_set(struct led_classdev *cdev,
                               enum led_brightness brightness);
 
 static struct led_classdev logolamp_led = {
  .name = "fujitsu::logolamp",
  .brightness_get = logolamp_get,
- .brightness_set = logolamp_set
+ .brightness_set_blocking = logolamp_set
 };
 
 static enum led_brightness kblamps_get(struct led_classdev *cdev);
-static void kblamps_set(struct led_classdev *cdev,
+static int kblamps_set(struct led_classdev *cdev,
                               enum led_brightness brightness);
 
 static struct led_classdev kblamps_led = {
  .name = "fujitsu::kblamps",
  .brightness_get = kblamps_get,
- .brightness_set = kblamps_set
+ .brightness_set_blocking = kblamps_set
 };
 
 static enum led_brightness radio_led_get(struct led_classdev *cdev);
-static void radio_led_set(struct led_classdev *cdev,
+static int radio_led_set(struct led_classdev *cdev,
                               enum led_brightness brightness);
 
 static struct led_classdev radio_led = {
  .name = "fujitsu::radio_led",
  .brightness_get = radio_led_get,
- .brightness_set = radio_led_set
+ .brightness_set_blocking = radio_led_set
 };
 
 static enum led_brightness eco_led_get(struct led_classdev *cdev);
-static void eco_led_set(struct led_classdev *cdev,
+static int eco_led_set(struct led_classdev *cdev,
                               enum led_brightness brightness);
 
 static struct led_classdev eco_led = {
  .name = "fujitsu::eco_led",
  .brightness_get = eco_led_get,
- .brightness_set = eco_led_set
+ .brightness_set_blocking = eco_led_set
 };
 #endif
 
@@ -267,48 +267,48 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
 #if IS_ENABLED(CONFIG_LEDS_CLASS)
 /* LED class callbacks */
 
-static void logolamp_set(struct led_classdev *cdev,
+static int logolamp_set(struct led_classdev *cdev,
                               enum led_brightness brightness)
 {
        if (brightness >= LED_FULL) {
                call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
-               call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
+               return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_ON);
        } else if (brightness >= LED_HALF) {
                call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_ON);
-               call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
+               return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, FUNC_LED_OFF);
        } else {
-               call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
+               return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, FUNC_LED_OFF);
        }
 }
 
-static void kblamps_set(struct led_classdev *cdev,
+static int kblamps_set(struct led_classdev *cdev,
                               enum led_brightness brightness)
 {
        if (brightness >= LED_FULL)
-               call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
+               return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_ON);
        else
-               call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
+               return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
 }
 
-static void radio_led_set(struct led_classdev *cdev,
+static int radio_led_set(struct led_classdev *cdev,
                                enum led_brightness brightness)
 {
        if (brightness >= LED_FULL)
-               call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+               return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
        else
-               call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+               return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
 }
 
-static void eco_led_set(struct led_classdev *cdev,
+static int eco_led_set(struct led_classdev *cdev,
                                enum led_brightness brightness)
 {
        int curr;
 
        curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
        if (brightness >= LED_FULL)
-               call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
+               return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr | ECO_LED_ON);
        else
-               call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
+               return call_fext_func(FUNC_LEDS, 0x1, ECO_LED, curr & ~ECO_LED_ON);
 }
 
 static enum led_brightness logolamp_get(struct led_classdev *cdev)
index 9a507e77eced18cc433792f651761dd137b75d62..90b05c72186c4f9e9c37a812ec469a738d253e95 100644 (file)
@@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
                        goto unwind_vring_allocations;
        }
 
-       /* track the rvdevs list reference */
-       kref_get(&rvdev->refcount);
-
        list_add_tail(&rvdev->node, &rproc->rvdevs);
 
        rproc_add_subdev(rproc, &rvdev->subdev,
@@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
        /*
         * Create a copy of the resource table. When a virtio device starts
         * and calls vring_new_virtqueue() the address of the allocated vring
-        * will be stored in the table_ptr. Before the device is started,
-        * table_ptr will be copied into device memory.
+        * will be stored in the cached_table. Before the device is started,
+        * cached_table will be copied into device memory.
         */
-       rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL);
-       if (!rproc->table_ptr)
+       rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+       if (!rproc->cached_table)
                goto clean_up;
 
+       rproc->table_ptr = rproc->cached_table;
+
        /* reset max_notifyid */
        rproc->max_notifyid = -1;
 
@@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
        }
 
        /*
-        * The starting device has been given the rproc->table_ptr as the
+        * The starting device has been given the rproc->cached_table as the
         * resource table. The address of the vring along with the other
-        * allocated resources (carveouts etc) is stored in table_ptr.
+        * allocated resources (carveouts etc) is stored in cached_table.
         * In order to pass this information to the remote device we must copy
         * this information to device memory. We also update the table_ptr so
         * that any subsequent changes will be applied to the loaded version.
         */
        loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
-       if (loaded_table)
-               memcpy(loaded_table, rproc->table_ptr, tablesz);
+       if (loaded_table) {
+               memcpy(loaded_table, rproc->cached_table, tablesz);
+               rproc->table_ptr = loaded_table;
+       }
 
        /* power up the remote processor */
        ret = rproc->ops->start(rproc);
@@ -951,7 +952,8 @@ stop_rproc:
 clean_up_resources:
        rproc_resource_cleanup(rproc);
 clean_up:
-       kfree(rproc->table_ptr);
+       kfree(rproc->cached_table);
+       rproc->cached_table = NULL;
        rproc->table_ptr = NULL;
 
        rproc_disable_iommu(rproc);
@@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc)
        rproc_disable_iommu(rproc);
 
        /* Free the copy of the resource table */
-       kfree(rproc->table_ptr);
+       kfree(rproc->cached_table);
+       rproc->cached_table = NULL;
        rproc->table_ptr = NULL;
 
        /* if in crash state, unlock crash handler */
index a79cb5a9e5f22963ed56214421abc099c5949c37..1cfb775e8e82b8b391108c700abba57f92867066 100644 (file)
@@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
        struct device *dev = &rpdev->dev;
        int ret;
 
-       dev_set_name(&rpdev->dev, "%s:%s",
-                    dev_name(dev->parent), rpdev->id.name);
+       dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+                    rpdev->id.name, rpdev->src, rpdev->dst);
 
        rpdev->dev.bus = &rpmsg_bus;
        rpdev->dev.release = rpmsg_release_device;
index d9e15210b110efcd3147d5a02f0e06c0673d4ad3..5caf5f3ff642282ee13776e9df9ca9a18f494536 100644 (file)
@@ -64,9 +64,9 @@ int           max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
 u32    bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
 u32    *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CB                "cbfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT                "ctfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT2       "ct2fw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CB                "cbfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT                "ctfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT2       "ct2fw-3.2.5.1.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);
index f9e862093a25935e601c45b4854035e98e035f03..cfcfff48e8e16e3fb2b66c0d491c0d612a8cdd38 100644 (file)
@@ -58,7 +58,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.2.25.0"
+#define BFAD_DRIVER_VERSION    "3.2.25.1"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
index 9ddc9200e0a48b00632bbc441c492679786ec570..9e4b7709043e0c49620755cafce2f80b3e99df89 100644 (file)
@@ -248,6 +248,7 @@ struct fnic {
        struct completion *remove_wait; /* device remove thread blocks */
 
        atomic_t in_flight;             /* io counter */
+       bool internal_reset_inprogress;
        u32 _reserved;                  /* fill hole */
        unsigned long state_flags;      /* protected by host lock */
        enum fnic_state state;
index 2544a37ece0afdc19742d01954487f87dd4c3c5c..adb3d5871e743442e04014cd2f86eae15ab27912 100644 (file)
@@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
        unsigned long wait_host_tmo;
        struct Scsi_Host *shost = sc->device->host;
        struct fc_lport *lp = shost_priv(shost);
+       struct fnic *fnic = lport_priv(lp);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (fnic->internal_reset_inprogress == 0) {
+               fnic->internal_reset_inprogress = 1;
+       } else {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "host reset in progress skipping another host reset\n");
+               return SUCCESS;
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
        /*
         * If fnic_reset is successful, wait for fabric login to complete
@@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
                }
        }
 
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       fnic->internal_reset_inprogress = 0;
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
        return ret;
 }
 
index 3d3768aaab4f2bb79ccbaf40f1f569114e4f9b30..8fb5c54c7dd3752084cf1063078422b34e65624c 100644 (file)
@@ -3585,7 +3585,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
                               1, 1);
        if (rc) {
                pr_err("srp_transfer_data() failed: %d\n", rc);
-               return -EAGAIN;
+               return -EIO;
        }
        /*
         * We now tell TCM to add this WRITE CDB directly into the TCM storage
index 23ca8a274586746e19204f291a844e6cbc43a07f..21331453db7bd29a0c2bc8cac430b8c8bb60b9c2 100644 (file)
@@ -1,6 +1,6 @@
 config QEDI
        tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
-       depends on PCI && SCSI
+       depends on PCI && SCSI && UIO
        depends on QED
        select SCSI_ISCSI_ATTRS
        select QED_LL2
index c35b6de4ca643297d1908341421c865c2cb93e84..e9e1e141af9cd287bcca730d05a7a62d58fb644a 100644 (file)
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
        BUG_ON(count > sdb->table.nents);
        sdb->table.nents = count;
-       sdb->length = blk_rq_bytes(req);
+       sdb->length = blk_rq_payload_bytes(req);
        return BLKPREP_OK;
 }
 
@@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
         * request queue. 
         */
        if (q->mq_ops) {
-               blk_mq_stop_hw_queues(q);
+               blk_mq_quiesce_queue(q);
        } else {
                spin_lock_irqsave(q->queue_lock, flags);
                blk_stop_queue(q);
index b1933041da39d414f9c6e127052ba8cbaa25e65e..1fbb1ecf49f2ec8639e30457e2ec3aee68015bb4 100644 (file)
@@ -836,7 +836,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        struct bio *bio = rq->bio;
        sector_t sector = blk_rq_pos(rq);
        unsigned int nr_sectors = blk_rq_sectors(rq);
-       unsigned int nr_bytes = blk_rq_bytes(rq);
        int ret;
 
        if (sdkp->device->no_write_same)
@@ -869,21 +868,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
        cmd->transfersize = sdp->sector_size;
        cmd->allowed = SD_MAX_RETRIES;
-
-       /*
-        * For WRITE_SAME the data transferred in the DATA IN buffer is
-        * different from the amount of data actually written to the target.
-        *
-        * We set up __data_len to the amount of data transferred from the
-        * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
-        * to transfer a single sector of data first, but then reset it to
-        * the amount of data to be written right after so that the I/O path
-        * knows how much to actually write.
-        */
-       rq->__data_len = sdp->sector_size;
-       ret = scsi_init_io(cmd);
-       rq->__data_len = nr_bytes;
-       return ret;
+       return scsi_init_io(cmd);
 }
 
 static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
index 396b32dca07464dddbf575a37fd13ad1e3f92579..7cf70aaec0ba32ce9c506a84f21fb9be39bf9e64 100644 (file)
@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pool) {
                SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
 
+               ret = -ENOMEM;
                goto err_free_res;
        }
 
@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pool) {
                SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
 
+               ret = -ENOMEM;
                goto err_free_dflt_sgl_pool;
        }
 
@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pool) {
                SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
 
+               ret = -ENOMEM;
                goto err_free_max_sgl_pool;
        }
 
index 8130dfe897451268adcea02c420d3aa61c9809aa..4971aa54756a965ccc99edc63946fee11c13d3e9 100644 (file)
@@ -770,6 +770,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                        /* Initialize the device private structure. */
                        struct octeon_ethernet *priv = netdev_priv(dev);
 
+                       SET_NETDEV_DEV(dev, &pdev->dev);
                        dev->netdev_ops = &cvm_oct_pow_netdev_ops;
                        priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
                        priv->port = CVMX_PIP_NUM_INPUT_PORTS;
@@ -816,6 +817,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                        }
 
                        /* Initialize the device private structure. */
+                       SET_NETDEV_DEV(dev, &pdev->dev);
                        priv = netdev_priv(dev);
                        priv->netdev = dev;
                        priv->of_node = cvm_oct_node_for_port(pip, interface,
index 7dfefd66df93874b1359824890b4b760275ff2c6..1cadc9eefa21a47e783160b874dbd2ce02f8f05f 100644 (file)
@@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
        case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+       case TCM_TOO_MANY_TARGET_DESCS:
+       case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
+       case TCM_TOO_MANY_SEGMENT_DESCS:
+       case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
                break;
        case TCM_OUT_OF_RESOURCES:
                sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = {
                .key = ILLEGAL_REQUEST,
                .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
        },
+       [TCM_TOO_MANY_TARGET_DESCS] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
+       },
+       [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
+       },
+       [TCM_TOO_MANY_SEGMENT_DESCS] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
+       },
+       [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
+       },
        [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
                .key = ILLEGAL_REQUEST,
                .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
index 37d5caebffa6b593025a28b703a54a71e7d940d3..d828b3b5000bf421826b9823efcbac8d6b2d58a3 100644 (file)
@@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
        return 0;
 }
 
-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
-                                       bool src)
+static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+                                       struct se_device **found_dev)
 {
        struct se_device *se_dev;
-       unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+       unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
        int rc;
 
-       if (src)
-               dev_wwn = &xop->dst_tid_wwn[0];
-       else
-               dev_wwn = &xop->src_tid_wwn[0];
-
        mutex_lock(&g_device_mutex);
        list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
 
@@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                if (rc != 0)
                        continue;
 
-               if (src) {
-                       xop->dst_dev = se_dev;
-                       pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
-                               " se_dev\n", xop->dst_dev);
-               } else {
-                       xop->src_dev = se_dev;
-                       pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
-                               " se_dev\n", xop->src_dev);
-               }
+               *found_dev = se_dev;
+               pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
 
                rc = target_depend_item(&se_dev->dev_group.cg_item);
                if (rc != 0) {
@@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
 }
 
 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
-                               unsigned char *p, bool src)
+                               unsigned char *p, unsigned short cscd_index)
 {
        unsigned char *desc = p;
        unsigned short ript;
@@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
                return -EINVAL;
        }
 
-       if (src) {
+       if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
+               pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
+                        "dest\n", cscd_index);
+               return 0;
+       }
+
+       if (cscd_index == xop->stdi) {
                memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
                /*
                 * Determine if the source designator matches the local device
@@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
                        pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
                                        " received xop\n", xop->src_dev);
                }
-       } else {
+       }
+
+       if (cscd_index == xop->dtdi) {
                memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
                /*
-                * Determine if the destination designator matches the local device
+                * Determine if the destination designator matches the local
+                * device. If @cscd_index corresponds to both source (stdi) and
+                * destination (dtdi), or dtdi comes after stdi, then
+                * XCOL_DEST_RECV_OP wins.
                 */
                if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
                                XCOPY_NAA_IEEE_REGEX_LEN)) {
@@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 {
        struct se_device *local_dev = se_cmd->se_dev;
        unsigned char *desc = p;
-       int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+       int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
+       unsigned short cscd_index = 0;
        unsigned short start = 0;
-       bool src = true;
 
        *sense_ret = TCM_INVALID_PARAMETER_LIST;
 
        if (offset != 0) {
                pr_err("XCOPY target descriptor list length is not"
                        " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+               *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
                return -EINVAL;
        }
-       if (tdll > 64) {
+       if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
                pr_err("XCOPY target descriptor supports a maximum"
                        " two src/dest descriptors, tdll: %hu too large..\n", tdll);
+               /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
+               *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
                return -EINVAL;
        }
        /*
@@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 
        while (start < tdll) {
                /*
-                * Check target descriptor identification with 0xE4 type with
-                * use VPD 0x83 WWPN matching ..
+                * Check target descriptor identification with 0xE4 type, and
+                * compare the current index with the CSCD descriptor IDs in
+                * the segment descriptor. Use VPD 0x83 WWPN matching ..
                 */
                switch (desc[0]) {
                case 0xe4:
                        rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
-                                                       &desc[0], src);
+                                                       &desc[0], cscd_index);
                        if (rc != 0)
                                goto out;
-                       /*
-                        * Assume target descriptors are in source -> destination order..
-                        */
-                       if (src)
-                               src = false;
-                       else
-                               src = true;
                        start += XCOPY_TARGET_DESC_LEN;
                        desc += XCOPY_TARGET_DESC_LEN;
-                       ret++;
+                       cscd_index++;
                        break;
                default:
                        pr_err("XCOPY unsupported descriptor type code:"
                                        " 0x%02x\n", desc[0]);
+                       *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
                        goto out;
                }
        }
 
-       if (xop->op_origin == XCOL_SOURCE_RECV_OP)
-               rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
-       else
-               rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+       switch (xop->op_origin) {
+       case XCOL_SOURCE_RECV_OP:
+               rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
+                                               &xop->dst_dev);
+               break;
+       case XCOL_DEST_RECV_OP:
+               rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
+                                               &xop->src_dev);
+               break;
+       default:
+               pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+                       "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
+               rc = -EINVAL;
+               break;
+       }
        /*
         * If a matching IEEE NAA 0x83 descriptor for the requested device
         * is not located on this node, return COPY_ABORTED with ASQ/ASQC
@@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
        pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
                 xop->dst_dev, &xop->dst_tid_wwn[0]);
 
-       return ret;
+       return cscd_index;
 
 out:
        return -EINVAL;
@@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
 
        xop->stdi = get_unaligned_be16(&desc[4]);
        xop->dtdi = get_unaligned_be16(&desc[6]);
+
+       if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
+           xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
+               pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
+                       XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
+               return -EINVAL;
+       }
+
        pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
                desc_len, xop->stdi, xop->dtdi, dc);
 
@@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
 
 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
                                struct xcopy_op *xop, unsigned char *p,
-                               unsigned int sdll)
+                               unsigned int sdll, sense_reason_t *sense_ret)
 {
        unsigned char *desc = p;
        unsigned int start = 0;
        int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
 
+       *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
        if (offset != 0) {
                pr_err("XCOPY segment descriptor list length is not"
                        " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+               *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+               return -EINVAL;
+       }
+       if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
+               pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
+                       " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
+               /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
+               *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
                return -EINVAL;
        }
 
@@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
                default:
                        pr_err("XCOPY unsupported segment descriptor"
                                "type: 0x%02x\n", desc[0]);
+                       *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
                        goto out;
                }
        }
@@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
 
+       if (se_cmd->data_length == 0) {
+               target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+               return TCM_NO_SENSE;
+       }
+       if (se_cmd->data_length < XCOPY_HDR_LEN) {
+               pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+                               se_cmd->data_length, XCOPY_HDR_LEN);
+               return TCM_PARAMETER_LIST_LENGTH_ERROR;
+       }
+
        xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
        if (!xop) {
                pr_err("Unable to allocate xcopy_op\n");
@@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
         */
        tdll = get_unaligned_be16(&p[2]);
        sdll = get_unaligned_be32(&p[8]);
+       if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
+               pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
+                      tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
+               ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+               goto out;
+       }
 
        inline_dl = get_unaligned_be32(&p[12]);
        if (inline_dl != 0) {
@@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
                goto out;
        }
 
+       if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
+               pr_err("XCOPY parameter truncation: data length %u too small "
+                       "for tdll: %hu sdll: %u inline_dl: %u\n",
+                       se_cmd->data_length, tdll, sdll, inline_dl);
+               ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+               goto out;
+       }
+
        pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
                " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
                tdll, sdll, inline_dl);
 
+       /*
+        * skip over the target descriptors until segment descriptors
+        * have been passed - CSCD ids are needed to determine src and dest.
+        */
+       seg_desc = &p[16] + tdll;
+
+       rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
+                                                   sdll, &ret);
+       if (rc <= 0)
+               goto out;
+
+       pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+                               rc * XCOPY_SEGMENT_DESC_LEN);
+
        rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
        if (rc <= 0)
                goto out;
@@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
 
        pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
                                rc * XCOPY_TARGET_DESC_LEN);
-       seg_desc = &p[16];
-       seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
-
-       rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
-       if (rc <= 0) {
-               xcopy_pt_undepend_remotedev(xop);
-               goto out;
-       }
        transport_kunmap_data_sg(se_cmd);
 
-       pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
-                               rc * XCOPY_SEGMENT_DESC_LEN);
        INIT_WORK(&xop->xop_work, target_xcopy_do_work);
        queue_work(xcopy_wq, &xop->xop_work);
        return TCM_NO_SENSE;
index 4d3d4dd060f28366ebd069abb603472ae0275d5b..7c0b105cbe1b486062c40d97280059a286f59bf8 100644 (file)
@@ -1,10 +1,17 @@
 #include <target/target_core_base.h>
 
+#define XCOPY_HDR_LEN                  16
 #define XCOPY_TARGET_DESC_LEN          32
 #define XCOPY_SEGMENT_DESC_LEN         28
 #define XCOPY_NAA_IEEE_REGEX_LEN       16
 #define XCOPY_MAX_SECTORS              1024
 
+/*
+ * SPC4r37 6.4.6.1
+ * Table 150 â€” CSCD descriptor ID values
+ */
+#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX        0x07FF
+
 enum xcopy_origin_list {
        XCOL_SOURCE_RECV_OP = 0x01,
        XCOL_DEST_RECV_OP = 0x02,
index 0aa9e7d697a5df8a8d25194c377498ddce049d87..25dbd8c7aec73345d357c2a75ff0cde26c918217 100644 (file)
@@ -239,6 +239,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        if (ifp->desc.bNumEndpoints >= num_ep)
                goto skip_to_next_endpoint_or_interface_descriptor;
 
+       /* Check for duplicate endpoint addresses */
+       for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
+               if (ifp->endpoint[i].desc.bEndpointAddress ==
+                   d->bEndpointAddress) {
+                       dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
+                           cfgno, inum, asnum, d->bEndpointAddress);
+                       goto skip_to_next_endpoint_or_interface_descriptor;
+               }
+       }
+
        endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
        ++ifp->desc.bNumEndpoints;
 
index 1fa5c0f29c647ab38367c00817518e122cf3b65a..a56c75e09786d5fa1e064a8e56e58e4095e4d716 100644 (file)
@@ -103,8 +103,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
 
 static void hub_release(struct kref *kref);
 static int usb_reset_and_verify_device(struct usb_device *udev);
-static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
-                                         struct usb_port *port_dev);
+static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
 
 static inline char *portspeed(struct usb_hub *hub, int portstatus)
 {
@@ -902,34 +901,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
                        USB_PORT_FEAT_LINK_STATE);
 }
 
-/*
- * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
- * a connection with a plugged-in cable but will signal the host when the cable
- * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
- */
-static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
-{
-       struct usb_port *port_dev = hub->ports[port1 - 1];
-       struct usb_device *hdev = hub->hdev;
-       int ret = 0;
-
-       if (!hub->error) {
-               if (hub_is_superspeed(hub->hdev)) {
-                       hub_usb3_port_prepare_disable(hub, port_dev);
-                       ret = hub_set_port_link_state(hub, port_dev->portnum,
-                                                     USB_SS_PORT_LS_U3);
-               } else {
-                       ret = usb_clear_port_feature(hdev, port1,
-                                       USB_PORT_FEAT_ENABLE);
-               }
-       }
-       if (port_dev->child && set_state)
-               usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
-       if (ret && ret != -ENODEV)
-               dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
-       return ret;
-}
-
 /*
  * Disable a port and mark a logical connect-change event, so that some
  * time later hub_wq will disconnect() any existing usb_device on the port
@@ -4162,6 +4133,34 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
 
 #endif /* CONFIG_PM */
 
+/*
+ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
+ * a connection with a plugged-in cable but will signal the host when the cable
+ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
+ */
+static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+{
+       struct usb_port *port_dev = hub->ports[port1 - 1];
+       struct usb_device *hdev = hub->hdev;
+       int ret = 0;
+
+       if (!hub->error) {
+               if (hub_is_superspeed(hub->hdev)) {
+                       hub_usb3_port_prepare_disable(hub, port_dev);
+                       ret = hub_set_port_link_state(hub, port_dev->portnum,
+                                                     USB_SS_PORT_LS_U3);
+               } else {
+                       ret = usb_clear_port_feature(hdev, port1,
+                                       USB_PORT_FEAT_ENABLE);
+               }
+       }
+       if (port_dev->child && set_state)
+               usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+       if (ret && ret != -ENODEV)
+               dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
+       return ret;
+}
+
 
 /* USB 2.0 spec, 7.1.7.3 / fig 7-29:
  *
index b95930f20d906dfc02d4d6db7dbe954e475d11cc..c55db4aa54d677c77fb2a0c9bcff7d38ac7d1b9e 100644 (file)
@@ -3753,7 +3753,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
                        MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
-                       &hs_ep->desc_list_dma, GFP_KERNEL);
+                       &hs_ep->desc_list_dma, GFP_ATOMIC);
                if (!hs_ep->desc_list) {
                        ret = -ENOMEM;
                        goto error2;
index a786256535b6a77392018aa93f78f785daf8a1b2..11fe68a4627bd315c8e82dbd3398b2dfde9bde8c 100644 (file)
@@ -247,8 +247,6 @@ MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
 static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
                                     char *property, u8 size, u64 *value)
 {
-       u8 val8;
-       u16 val16;
        u32 val32;
 
        switch (size) {
@@ -256,17 +254,7 @@ static void dwc2_get_device_property(struct dwc2_hsotg *hsotg,
                *value = device_property_read_bool(hsotg->dev, property);
                break;
        case 1:
-               if (device_property_read_u8(hsotg->dev, property, &val8))
-                       return;
-
-               *value = val8;
-               break;
        case 2:
-               if (device_property_read_u16(hsotg->dev, property, &val16))
-                       return;
-
-               *value = val16;
-               break;
        case 4:
                if (device_property_read_u32(hsotg->dev, property, &val32))
                        return;
@@ -1100,13 +1088,13 @@ static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg)
        /* Buffer DMA */
        dwc2_set_param_bool(hsotg, &p->g_dma,
                            false, "gadget-dma",
-                           true, false,
+                           dma_capable, false,
                            dma_capable);
 
        /* DMA Descriptor */
        dwc2_set_param_bool(hsotg, &p->g_dma_desc, false,
                            "gadget-dma-desc",
-                           p->g_dma, false,
+                           !!hw->dma_desc_enable, false,
                            !!hw->dma_desc_enable);
 }
 
@@ -1130,8 +1118,14 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
 
                dwc2_set_param_bool(hsotg, &p->host_dma,
                                    false, "host-dma",
-                                   true, false,
+                                   dma_capable, false,
                                    dma_capable);
+               dwc2_set_param_host_rx_fifo_size(hsotg,
+                               params->host_rx_fifo_size);
+               dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
+                               params->host_nperio_tx_fifo_size);
+               dwc2_set_param_host_perio_tx_fifo_size(hsotg,
+                               params->host_perio_tx_fifo_size);
        }
        dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
        dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
@@ -1140,12 +1134,6 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
                        params->host_support_fs_ls_low_power);
        dwc2_set_param_enable_dynamic_fifo(hsotg,
                        params->enable_dynamic_fifo);
-       dwc2_set_param_host_rx_fifo_size(hsotg,
-                       params->host_rx_fifo_size);
-       dwc2_set_param_host_nperio_tx_fifo_size(hsotg,
-                       params->host_nperio_tx_fifo_size);
-       dwc2_set_param_host_perio_tx_fifo_size(hsotg,
-                       params->host_perio_tx_fifo_size);
        dwc2_set_param_max_transfer_size(hsotg,
                        params->max_transfer_size);
        dwc2_set_param_max_packet_count(hsotg,
index de5a8570be04213cd3fd39c774662c9173907e34..14b760209680920e65e92fbbb75a14c3bd4a3b18 100644 (file)
@@ -45,9 +45,7 @@
 #define DWC3_XHCI_RESOURCES_NUM        2
 
 #define DWC3_SCRATCHBUF_SIZE   4096    /* each buffer is assumed to be 4KiB */
-#define DWC3_EVENT_SIZE                4       /* bytes */
-#define DWC3_EVENT_MAX_NUM     64      /* 2 events/endpoint */
-#define DWC3_EVENT_BUFFERS_SIZE        (DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
+#define DWC3_EVENT_BUFFERS_SIZE        4096
 #define DWC3_EVENT_TYPE_MASK   0xfe
 
 #define DWC3_EVENT_TYPE_DEV    0
 #define DWC3_DCFG_SUPERSPEED_PLUS (5 << 0)  /* DWC_usb31 only */
 #define DWC3_DCFG_SUPERSPEED   (4 << 0)
 #define DWC3_DCFG_HIGHSPEED    (0 << 0)
-#define DWC3_DCFG_FULLSPEED2   (1 << 0)
+#define DWC3_DCFG_FULLSPEED    (1 << 0)
 #define DWC3_DCFG_LOWSPEED     (2 << 0)
-#define DWC3_DCFG_FULLSPEED1   (3 << 0)
 
 #define DWC3_DCFG_NUMP_SHIFT   17
 #define DWC3_DCFG_NUMP(n)      (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
 #define DWC3_DSTS_SUPERSPEED_PLUS      (5 << 0) /* DWC_usb31 only */
 #define DWC3_DSTS_SUPERSPEED           (4 << 0)
 #define DWC3_DSTS_HIGHSPEED            (0 << 0)
-#define DWC3_DSTS_FULLSPEED2           (1 << 0)
+#define DWC3_DSTS_FULLSPEED            (1 << 0)
 #define DWC3_DSTS_LOWSPEED             (2 << 0)
-#define DWC3_DSTS_FULLSPEED1           (3 << 0)
 
 /* Device Generic Command Register */
 #define DWC3_DGCMD_SET_LMP             0x01
index 29e80cc9b634aab72f59e73c71667dfef9f86a45..eb1b9cb3f9d1df29bb4c6d8fe2b8d4b3ebf7897b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/dwc3-omap.h>
@@ -510,7 +511,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
 
        /* check the DMA Status */
        reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
-
+       irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
        ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
                                        dwc3_omap_interrupt_thread, IRQF_SHARED,
                                        "dwc3-omap", omap);
@@ -531,7 +532,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        }
 
        dwc3_omap_enable_irqs(omap);
-
+       enable_irq(omap->irq);
        return 0;
 
 err2:
@@ -552,6 +553,7 @@ static int dwc3_omap_remove(struct platform_device *pdev)
        extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
        extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
        dwc3_omap_disable_irqs(omap);
+       disable_irq(omap->irq);
        of_platform_depopulate(omap->dev);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
index 2b73339f286ba109d74e465aa55189bf84463398..cce0a220b6b0437f23102da3fa56f4f7373f9082 100644 (file)
@@ -38,6 +38,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP                        0xa2b0
+#define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 
 #define PCI_INTEL_BXT_DSM_UUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -73,16 +74,6 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
 {
        struct platform_device          *dwc3 = dwc->dwc3;
        struct pci_dev                  *pdev = dwc->pci;
-       int                             ret;
-
-       struct property_entry sysdev_property[] = {
-               PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
-               { },
-       };
-
-       ret = platform_device_add_properties(dwc3, sysdev_property);
-       if (ret)
-               return ret;
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD &&
            pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
@@ -105,6 +96,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
                        PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
                        PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
                        PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+                       PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
                        { },
                };
 
@@ -115,7 +107,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
                int ret;
 
                struct property_entry properties[] = {
-                       PROPERTY_ENTRY_STRING("dr-mode", "peripheral"),
+                       PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+                       PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
                        { }
                };
 
@@ -167,6 +160,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
                        PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
                        PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
                        PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+                       PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
                        { },
                };
 
@@ -274,6 +268,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index 4878d187c7d4a4b3b3851c29624ff04bb32df020..9bb1f8526f3ed4d5413e008b2ec24b4565f3f619 100644 (file)
@@ -39,18 +39,13 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                struct dwc3_ep *dep, struct dwc3_request *req);
 
-static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
-               u32 len, u32 type, bool chain)
+static void dwc3_ep0_prepare_one_trb(struct dwc3 *dwc, u8 epnum,
+               dma_addr_t buf_dma, u32 len, u32 type, bool chain)
 {
-       struct dwc3_gadget_ep_cmd_params params;
        struct dwc3_trb                 *trb;
        struct dwc3_ep                  *dep;
 
-       int                             ret;
-
        dep = dwc->eps[epnum];
-       if (dep->flags & DWC3_EP_BUSY)
-               return 0;
 
        trb = &dwc->ep0_trb[dep->trb_enqueue];
 
@@ -71,15 +66,23 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
                trb->ctrl |= (DWC3_TRB_CTRL_IOC
                                | DWC3_TRB_CTRL_LST);
 
-       if (chain)
+       trace_dwc3_prepare_trb(dep, trb);
+}
+
+static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum)
+{
+       struct dwc3_gadget_ep_cmd_params params;
+       struct dwc3_ep                  *dep;
+       int                             ret;
+
+       dep = dwc->eps[epnum];
+       if (dep->flags & DWC3_EP_BUSY)
                return 0;
 
        memset(&params, 0, sizeof(params));
        params.param0 = upper_32_bits(dwc->ep0_trb_addr);
        params.param1 = lower_32_bits(dwc->ep0_trb_addr);
 
-       trace_dwc3_prepare_trb(dep, trb);
-
        ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
        if (ret < 0)
                return ret;
@@ -280,8 +283,9 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
 
        complete(&dwc->ep0_in_setup);
 
-       ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
+       dwc3_ep0_prepare_one_trb(dwc, 0, dwc->ctrl_req_addr, 8,
                        DWC3_TRBCTL_CONTROL_SETUP, false);
+       ret = dwc3_ep0_start_trans(dwc, 0);
        WARN_ON(ret < 0);
 }
 
@@ -912,9 +916,9 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
 
                        dwc->ep0_next_event = DWC3_EP0_COMPLETE;
 
-                       ret = dwc3_ep0_start_trans(dwc, epnum,
-                                       dwc->ctrl_req_addr, 0,
-                                       DWC3_TRBCTL_CONTROL_DATA, false);
+                       dwc3_ep0_prepare_one_trb(dwc, epnum, dwc->ctrl_req_addr,
+                                       0, DWC3_TRBCTL_CONTROL_DATA, false);
+                       ret = dwc3_ep0_start_trans(dwc, epnum);
                        WARN_ON(ret < 0);
                }
        }
@@ -993,9 +997,10 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
        req->direction = !!dep->number;
 
        if (req->request.length == 0) {
-               ret = dwc3_ep0_start_trans(dwc, dep->number,
+               dwc3_ep0_prepare_one_trb(dwc, dep->number,
                                dwc->ctrl_req_addr, 0,
                                DWC3_TRBCTL_CONTROL_DATA, false);
+               ret = dwc3_ep0_start_trans(dwc, dep->number);
        } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
                        && (dep->number == 0)) {
                u32     transfer_size = 0;
@@ -1011,7 +1016,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
                        transfer_size = ALIGN(req->request.length - maxpacket,
                                              maxpacket);
-                       ret = dwc3_ep0_start_trans(dwc, dep->number,
+                       dwc3_ep0_prepare_one_trb(dwc, dep->number,
                                                   req->request.dma,
                                                   transfer_size,
                                                   DWC3_TRBCTL_CONTROL_DATA,
@@ -1023,18 +1028,20 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 
                dwc->ep0_bounced = true;
 
-               ret = dwc3_ep0_start_trans(dwc, dep->number,
+               dwc3_ep0_prepare_one_trb(dwc, dep->number,
                                dwc->ep0_bounce_addr, transfer_size,
                                DWC3_TRBCTL_CONTROL_DATA, false);
+               ret = dwc3_ep0_start_trans(dwc, dep->number);
        } else {
                ret = usb_gadget_map_request_by_dev(dwc->sysdev,
                                &req->request, dep->number);
                if (ret)
                        return;
 
-               ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
+               dwc3_ep0_prepare_one_trb(dwc, dep->number, req->request.dma,
                                req->request.length, DWC3_TRBCTL_CONTROL_DATA,
                                false);
+               ret = dwc3_ep0_start_trans(dwc, dep->number);
        }
 
        WARN_ON(ret < 0);
@@ -1048,8 +1055,9 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
        type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
                : DWC3_TRBCTL_CONTROL_STATUS2;
 
-       return dwc3_ep0_start_trans(dwc, dep->number,
+       dwc3_ep0_prepare_one_trb(dwc, dep->number,
                        dwc->ctrl_req_addr, 0, type, false);
+       return dwc3_ep0_start_trans(dwc, dep->number);
 }
 
 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
index efddaf5d11d12639867ce8cb7c9a24ae2c7333c9..204c754cc647072d4caffbe06a4c1a8b869ca51a 100644 (file)
@@ -180,11 +180,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
        if (req->request.status == -EINPROGRESS)
                req->request.status = status;
 
-       if (dwc->ep0_bounced && dep->number == 0)
+       if (dwc->ep0_bounced && dep->number <= 1)
                dwc->ep0_bounced = false;
-       else
-               usb_gadget_unmap_request_by_dev(dwc->sysdev,
-                               &req->request, req->direction);
+
+       usb_gadget_unmap_request_by_dev(dwc->sysdev,
+                       &req->request, req->direction);
 
        trace_dwc3_gadget_giveback(req);
 
@@ -1720,7 +1720,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
                        reg |= DWC3_DCFG_LOWSPEED;
                        break;
                case USB_SPEED_FULL:
-                       reg |= DWC3_DCFG_FULLSPEED1;
+                       reg |= DWC3_DCFG_FULLSPEED;
                        break;
                case USB_SPEED_HIGH:
                        reg |= DWC3_DCFG_HIGHSPEED;
@@ -2232,9 +2232,14 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 
        dep = dwc->eps[epnum];
 
-       if (!(dep->flags & DWC3_EP_ENABLED) &&
-           !(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
-               return;
+       if (!(dep->flags & DWC3_EP_ENABLED)) {
+               if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
+                       return;
+
+               /* Handle only EPCMDCMPLT when EP disabled */
+               if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
+                       return;
+       }
 
        if (epnum == 0 || epnum == 1) {
                dwc3_ep0_interrupt(dwc, event);
@@ -2531,8 +2536,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
                dwc->gadget.ep0->maxpacket = 64;
                dwc->gadget.speed = USB_SPEED_HIGH;
                break;
-       case DWC3_DSTS_FULLSPEED2:
-       case DWC3_DSTS_FULLSPEED1:
+       case DWC3_DSTS_FULLSPEED:
                dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
                dwc->gadget.ep0->maxpacket = 64;
                dwc->gadget.speed = USB_SPEED_FULL;
index 41ab61f9b6e0b24be1dd950afdae9f4f3a179d50..002822d98fda207505581ca75cb47373db81fb78 100644 (file)
@@ -1694,9 +1694,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                value = min(w_length, (u16) 1);
                break;
 
-       /* function drivers must handle get/set altsetting; if there's
-        * no get() method, we know only altsetting zero works.
-        */
+       /* function drivers must handle get/set altsetting */
        case USB_REQ_SET_INTERFACE:
                if (ctrl->bRequestType != USB_RECIP_INTERFACE)
                        goto unknown;
@@ -1705,7 +1703,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                f = cdev->config->interface[intf];
                if (!f)
                        break;
-               if (w_value && !f->set_alt)
+
+               /*
+                * If there's no get_alt() method, we know only altsetting zero
+                * works. There is no need to check if set_alt() is not NULL
+                * as we check this in usb_add_function().
+                */
+               if (w_value && !f->get_alt)
                        break;
                value = f->set_alt(f, w_index, w_value);
                if (value == USB_GADGET_DELAYED_STATUS) {
index aab3fc1dbb94c44f0287990e89aa98fc8bf3da3a..5e746adc8a2d5416b7e1bcbeb8c41559716599b4 100644 (file)
@@ -2091,8 +2091,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
 
        case FFS_STRING:
                /*
-                * Strings are indexed from 1 (0 is magic ;) reserved
-                * for languages list or some such)
+                * Strings are indexed from 1 (0 is reserved
+                * for languages list)
                 */
                if (*valuep > helper->ffs->strings_count)
                        helper->ffs->strings_count = *valuep;
@@ -2252,7 +2252,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
 
                if (len < sizeof(*d) ||
                    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   !d->Reserved1)
+                   d->Reserved1)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
@@ -3666,6 +3666,7 @@ static void ffs_closed(struct ffs_data *ffs)
 {
        struct ffs_dev *ffs_obj;
        struct f_fs_opts *opts;
+       struct config_item *ci;
 
        ENTER();
        ffs_dev_lock();
@@ -3689,8 +3690,11 @@ static void ffs_closed(struct ffs_data *ffs)
            || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
                goto done;
 
-       unregister_gadget_item(ffs_obj->opts->
-                              func_inst.group.cg_item.ci_parent->ci_parent);
+       ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+       ffs_dev_unlock();
+
+       unregister_gadget_item(ci);
+       return;
 done:
        ffs_dev_unlock();
 }
index 3151d2a0fe594b516841ae8c653878ad45899e2e..5f8139b8e601808414407e88a94a53a60892ae55 100644 (file)
@@ -593,7 +593,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
                }
                status = usb_ep_enable(hidg->out_ep);
                if (status < 0) {
-                       ERROR(cdev, "Enable IN endpoint FAILED!\n");
+                       ERROR(cdev, "Enable OUT endpoint FAILED!\n");
                        goto fail;
                }
                hidg->out_ep->driver_data = hidg;
index e8f4102d19df54fa1983578f8f773067ee317d92..6bde4396927c997b686419cbc4fb15997b0e304f 100644 (file)
@@ -1126,7 +1126,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        /* data and/or status stage for control request */
        } else if (dev->state == STATE_DEV_SETUP) {
 
-               /* IN DATA+STATUS caller makes len <= wLength */
+               len = min_t(size_t, len, dev->setup_wLength);
                if (dev->setup_in) {
                        retval = setup_req (dev->gadget->ep0, dev->req, len);
                        if (retval == 0) {
@@ -1734,10 +1734,12 @@ static struct usb_gadget_driver gadgetfs_driver = {
  * such as configuration notifications.
  */
 
-static int is_valid_config (struct usb_config_descriptor *config)
+static int is_valid_config(struct usb_config_descriptor *config,
+               unsigned int total)
 {
        return config->bDescriptorType == USB_DT_CONFIG
                && config->bLength == USB_DT_CONFIG_SIZE
+               && total >= USB_DT_CONFIG_SIZE
                && config->bConfigurationValue != 0
                && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
                && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
@@ -1762,7 +1764,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        }
        spin_unlock_irq(&dev->lock);
 
-       if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+       if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
+           (len > PAGE_SIZE * 4))
                return -EINVAL;
 
        /* we might need to change message format someday */
@@ -1786,7 +1789,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        /* full or low speed config */
        dev->config = (void *) kbuf;
        total = le16_to_cpu(dev->config->wTotalLength);
-       if (!is_valid_config (dev->config) || total >= length)
+       if (!is_valid_config(dev->config, total) ||
+                       total > length - USB_DT_DEVICE_SIZE)
                goto fail;
        kbuf += total;
        length -= total;
@@ -1795,10 +1799,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
        if (kbuf [1] == USB_DT_CONFIG) {
                dev->hs_config = (void *) kbuf;
                total = le16_to_cpu(dev->hs_config->wTotalLength);
-               if (!is_valid_config (dev->hs_config) || total >= length)
+               if (!is_valid_config(dev->hs_config, total) ||
+                               total > length - USB_DT_DEVICE_SIZE)
                        goto fail;
                kbuf += total;
                length -= total;
+       } else {
+               dev->hs_config = NULL;
        }
 
        /* could support multiple configs, using another encoding! */
@@ -1811,7 +1818,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
                        || dev->dev->bDescriptorType != USB_DT_DEVICE
                        || dev->dev->bNumConfigurations != 1)
                goto fail;
-       dev->dev->bNumConfigurations = 1;
        dev->dev->bcdUSB = cpu_to_le16 (0x0200);
 
        /* triggers gadgetfs_bind(); then we can enumerate. */
index 9483489080f66230370db44a2326a152a3f3eb47..0402177f93cdde2346586ff95b392256e17f84ce 100644 (file)
@@ -1317,7 +1317,11 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
                        if (!ret)
                                break;
                }
-               if (!ret && !udc->driver)
+               if (ret)
+                       ret = -ENODEV;
+               else if (udc->driver)
+                       ret = -EBUSY;
+               else
                        goto found;
        } else {
                list_for_each_entry(udc, &udc_list, list) {
index 02b14e91ae6c5a586417dec05d558dbe1cace35a..c60abe3a68f9cf48c21831d40bc32c4e9390b892 100644 (file)
@@ -330,7 +330,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
 /* caller must hold lock */
 static void stop_activity(struct dummy *dum)
 {
-       struct dummy_ep *ep;
+       int i;
 
        /* prevent any more requests */
        dum->address = 0;
@@ -338,8 +338,8 @@ static void stop_activity(struct dummy *dum)
        /* The timer is left running so that outstanding URBs can fail */
 
        /* nuke any pending requests first, so driver i/o is quiesced */
-       list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
-               nuke(dum, ep);
+       for (i = 0; i < DUMMY_ENDPOINTS; ++i)
+               nuke(dum, &dum->ep[i]);
 
        /* driver now does any non-usb quiescing necessary */
 }
index be9e6383688111fe8d2c53d3e679758f36fd9873..414e3c376dbbd59587dc3398f4a90872a5aae19c 100644 (file)
@@ -43,7 +43,6 @@ struct at91_usbh_data {
        struct gpio_desc *overcurrent_pin[AT91_MAX_USBH_PORTS];
        u8 ports;                               /* number of ports on root hub */
        u8 overcurrent_supported;
-       u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
        u8 overcurrent_status[AT91_MAX_USBH_PORTS];
        u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
 };
@@ -266,8 +265,7 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
        if (!valid_port(port))
                return;
 
-       gpiod_set_value(pdata->vbus_pin[port],
-                       pdata->vbus_pin_active_low[port] ^ enable);
+       gpiod_set_value(pdata->vbus_pin[port], enable);
 }
 
 static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -275,8 +273,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
        if (!valid_port(port))
                return -EINVAL;
 
-       return gpiod_get_value(pdata->vbus_pin[port]) ^
-              pdata->vbus_pin_active_low[port];
+       return gpiod_get_value(pdata->vbus_pin[port]);
 }
 
 /*
@@ -533,18 +530,17 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
                pdata->ports = ports;
 
        at91_for_each_port(i) {
-               pdata->vbus_pin[i] = devm_gpiod_get_optional(&pdev->dev,
-                                                            "atmel,vbus-gpio",
-                                                            GPIOD_IN);
+               if (i >= pdata->ports)
+                       break;
+
+               pdata->vbus_pin[i] =
+                       devm_gpiod_get_index_optional(&pdev->dev, "atmel,vbus",
+                                                     i, GPIOD_OUT_HIGH);
                if (IS_ERR(pdata->vbus_pin[i])) {
                        err = PTR_ERR(pdata->vbus_pin[i]);
                        dev_err(&pdev->dev, "unable to claim gpio \"vbus\": %d\n", err);
                        continue;
                }
-
-               pdata->vbus_pin_active_low[i] = gpiod_get_value(pdata->vbus_pin[i]);
-
-               ohci_at91_usb_set_power(pdata, i, 1);
        }
 
        at91_for_each_port(i) {
@@ -552,8 +548,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
                        break;
 
                pdata->overcurrent_pin[i] =
-                       devm_gpiod_get_optional(&pdev->dev,
-                                               "atmel,oc-gpio", GPIOD_IN);
+                       devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
+                                                     i, GPIOD_IN);
                if (IS_ERR(pdata->overcurrent_pin[i])) {
                        err = PTR_ERR(pdata->overcurrent_pin[i]);
                        dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
index 321de2e0161b4a3ef825489ab59a96e6250b1cd9..8414ed2a02de98b08ccadbe375d75b93da59cd35 100644 (file)
@@ -979,6 +979,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
        xhci->devs[slot_id] = NULL;
 }
 
+/*
+ * Free a virt_device structure.
+ * If the virt_device added a tt_info (a hub) and has children pointing to
+ * that tt_info, then free the child first. Recursive.
+ * We can't rely on udev at this point to find child-parent relationships.
+ */
+void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+{
+       struct xhci_virt_device *vdev;
+       struct list_head *tt_list_head;
+       struct xhci_tt_bw_info *tt_info, *next;
+       int i;
+
+       vdev = xhci->devs[slot_id];
+       if (!vdev)
+               return;
+
+       tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+       list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+               /* is this a hub device that added a tt_info to the tts list */
+               if (tt_info->slot_id == slot_id) {
+                       /* are any devices using this tt_info? */
+                       for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
+                               vdev = xhci->devs[i];
+                               if (vdev && (vdev->tt_info == tt_info))
+                                       xhci_free_virt_devices_depth_first(
+                                               xhci, i);
+                       }
+               }
+       }
+       /* we are now at a leaf device */
+       xhci_free_virt_device(xhci, slot_id);
+}
+
 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
                struct usb_device *udev, gfp_t flags)
 {
@@ -1795,7 +1829,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int size;
        int i, j, num_ports;
 
-       del_timer_sync(&xhci->cmd_timer);
+       cancel_delayed_work_sync(&xhci->cmd_timer);
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1828,8 +1862,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
                }
        }
 
-       for (i = 1; i < MAX_HC_SLOTS; ++i)
-               xhci_free_virt_device(xhci, i);
+       for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
+               xhci_free_virt_devices_depth_first(xhci, i);
 
        dma_pool_destroy(xhci->segment_pool);
        xhci->segment_pool = NULL;
@@ -2342,9 +2376,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 
        INIT_LIST_HEAD(&xhci->cmd_list);
 
-       /* init command timeout timer */
-       setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
-                   (unsigned long)xhci);
+       /* init command timeout work */
+       INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
+       init_completion(&xhci->cmd_ring_stop_completion);
 
        page_size = readl(&xhci->op_regs->page_size);
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
index 1094ebd2838ff95a5834f7a680e4a2c0aa13301c..bac961cd24ad62b649da486f62c519ab0b51661d 100644 (file)
@@ -579,8 +579,10 @@ static int xhci_mtk_probe(struct platform_device *pdev)
                goto disable_ldos;
 
        irq = platform_get_irq(pdev, 0);
-       if (irq < 0)
+       if (irq < 0) {
+               ret = irq;
                goto disable_clk;
+       }
 
        /* Initialize dma_mask and coherent_dma_mask to 32-bits */
        ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
index e96ae80d107e94fd8db8c33cae52ff9153f14730..954abfd5014d281d537e11353cb6093b9b7a0bdd 100644 (file)
@@ -165,7 +165,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
-                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
                xhci->quirks |= XHCI_PME_STUCK_QUIRK;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
index bdf6b13d9b6755a0504a15ba10fcfe1d06b5bc73..25f522b09dd9746938168e6f675464db9f2e6740 100644 (file)
@@ -279,23 +279,76 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
        readl(&xhci->dba->doorbell[0]);
 }
 
-static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+{
+       return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+}
+
+static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
+{
+       return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
+                                       cmd_list);
+}
+
+/*
+ * Turn all commands on command ring with status set to "aborted" to no-op trbs.
+ * If there are other commands waiting then restart the ring and kick the timer.
+ * This must be called with command ring stopped and xhci->lock held.
+ */
+static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
+                                        struct xhci_command *cur_cmd)
+{
+       struct xhci_command *i_cmd;
+       u32 cycle_state;
+
+       /* Turn all aborted commands in list to no-ops, then restart */
+       list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
+
+               if (i_cmd->status != COMP_CMD_ABORT)
+                       continue;
+
+               i_cmd->status = COMP_CMD_STOP;
+
+               xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
+                        i_cmd->command_trb);
+               /* get cycle state from the original cmd trb */
+               cycle_state = le32_to_cpu(
+                       i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
+               /* modify the command trb to no-op command */
+               i_cmd->command_trb->generic.field[0] = 0;
+               i_cmd->command_trb->generic.field[1] = 0;
+               i_cmd->command_trb->generic.field[2] = 0;
+               i_cmd->command_trb->generic.field[3] = cpu_to_le32(
+                       TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
+
+               /*
+                * caller waiting for completion is called when command
+                *  completion event is received for these no-op commands
+                */
+       }
+
+       xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
+
+       /* ring command ring doorbell to restart the command ring */
+       if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
+           !(xhci->xhc_state & XHCI_STATE_DYING)) {
+               xhci->current_cmd = cur_cmd;
+               xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+               xhci_ring_cmd_db(xhci);
+       }
+}
+
+/* Must be called with xhci->lock held, releases and aquires lock back */
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 {
        u64 temp_64;
        int ret;
 
        xhci_dbg(xhci, "Abort command ring\n");
 
-       temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
-       xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+       reinit_completion(&xhci->cmd_ring_stop_completion);
 
-       /*
-        * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
-        * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
-        * but the completion event in never sent. Use the cmd timeout timer to
-        * handle those cases. Use twice the time to cover the bit polling retry
-        */
-       mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
+       temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
                        &xhci->op_regs->cmd_ring);
 
@@ -315,17 +368,30 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
                udelay(1000);
                ret = xhci_handshake(&xhci->op_regs->cmd_ring,
                                     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
-               if (ret == 0)
-                       return 0;
-
-               xhci_err(xhci, "Stopped the command ring failed, "
-                               "maybe the host is dead\n");
-               del_timer(&xhci->cmd_timer);
-               xhci->xhc_state |= XHCI_STATE_DYING;
-               xhci_halt(xhci);
-               return -ESHUTDOWN;
+               if (ret < 0) {
+                       xhci_err(xhci, "Stopped the command ring failed, "
+                                "maybe the host is dead\n");
+                       xhci->xhc_state |= XHCI_STATE_DYING;
+                       xhci_halt(xhci);
+                       return -ESHUTDOWN;
+               }
+       }
+       /*
+        * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+        * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+        * but the completion event in never sent. Wait 2 secs (arbitrary
+        * number) to handle those cases after negation of CMD_RING_RUNNING.
+        */
+       spin_unlock_irqrestore(&xhci->lock, flags);
+       ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
+                                         msecs_to_jiffies(2000));
+       spin_lock_irqsave(&xhci->lock, flags);
+       if (!ret) {
+               xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
+               xhci_cleanup_command_queue(xhci);
+       } else {
+               xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
        }
-
        return 0;
 }
 
@@ -1207,101 +1273,62 @@ void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
                xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
 }
 
-/*
- * Turn all commands on command ring with status set to "aborted" to no-op trbs.
- * If there are other commands waiting then restart the ring and kick the timer.
- * This must be called with command ring stopped and xhci->lock held.
- */
-static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
-                                        struct xhci_command *cur_cmd)
-{
-       struct xhci_command *i_cmd, *tmp_cmd;
-       u32 cycle_state;
-
-       /* Turn all aborted commands in list to no-ops, then restart */
-       list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list,
-                                cmd_list) {
-
-               if (i_cmd->status != COMP_CMD_ABORT)
-                       continue;
-
-               i_cmd->status = COMP_CMD_STOP;
-
-               xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
-                        i_cmd->command_trb);
-               /* get cycle state from the original cmd trb */
-               cycle_state = le32_to_cpu(
-                       i_cmd->command_trb->generic.field[3]) & TRB_CYCLE;
-               /* modify the command trb to no-op command */
-               i_cmd->command_trb->generic.field[0] = 0;
-               i_cmd->command_trb->generic.field[1] = 0;
-               i_cmd->command_trb->generic.field[2] = 0;
-               i_cmd->command_trb->generic.field[3] = cpu_to_le32(
-                       TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
-
-               /*
-                * caller waiting for completion is called when command
-                *  completion event is received for these no-op commands
-                */
-       }
-
-       xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
-
-       /* ring command ring doorbell to restart the command ring */
-       if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
-           !(xhci->xhc_state & XHCI_STATE_DYING)) {
-               xhci->current_cmd = cur_cmd;
-               mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
-               xhci_ring_cmd_db(xhci);
-       }
-       return;
-}
-
-
-void xhci_handle_command_timeout(unsigned long data)
+void xhci_handle_command_timeout(struct work_struct *work)
 {
        struct xhci_hcd *xhci;
        int ret;
        unsigned long flags;
        u64 hw_ring_state;
-       bool second_timeout = false;
-       xhci = (struct xhci_hcd *) data;
 
-       /* mark this command to be cancelled */
+       xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
+
        spin_lock_irqsave(&xhci->lock, flags);
-       if (xhci->current_cmd) {
-               if (xhci->current_cmd->status == COMP_CMD_ABORT)
-                       second_timeout = true;
-               xhci->current_cmd->status = COMP_CMD_ABORT;
+
+       /*
+        * If timeout work is pending, or current_cmd is NULL, it means we
+        * raced with command completion. Command is handled so just return.
+        */
+       if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               return;
        }
+       /* mark this command to be cancelled */
+       xhci->current_cmd->status = COMP_CMD_ABORT;
 
        /* Make sure command ring is running before aborting it */
        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
            (hw_ring_state & CMD_RING_RUNNING))  {
-               spin_unlock_irqrestore(&xhci->lock, flags);
+               /* Prevent new doorbell, and start command abort */
+               xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
                xhci_dbg(xhci, "Command timeout\n");
-               ret = xhci_abort_cmd_ring(xhci);
+               ret = xhci_abort_cmd_ring(xhci, flags);
                if (unlikely(ret == -ESHUTDOWN)) {
                        xhci_err(xhci, "Abort command ring failed\n");
                        xhci_cleanup_command_queue(xhci);
+                       spin_unlock_irqrestore(&xhci->lock, flags);
                        usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
                        xhci_dbg(xhci, "xHCI host controller is dead.\n");
+
+                       return;
                }
-               return;
+
+               goto time_out_completed;
        }
 
-       /* command ring failed to restart, or host removed. Bail out */
-       if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
-               spin_unlock_irqrestore(&xhci->lock, flags);
-               xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+       /* host removed. Bail out */
+       if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+               xhci_dbg(xhci, "host removed, ring start fail?\n");
                xhci_cleanup_command_queue(xhci);
-               return;
+
+               goto time_out_completed;
        }
 
        /* command timeout on stopped ring, ring can't be aborted */
        xhci_dbg(xhci, "Command timeout on stopped ring\n");
        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+
+time_out_completed:
        spin_unlock_irqrestore(&xhci->lock, flags);
        return;
 }
@@ -1333,7 +1360,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 
        cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
 
-       del_timer(&xhci->cmd_timer);
+       cancel_delayed_work(&xhci->cmd_timer);
 
        trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
 
@@ -1341,7 +1368,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 
        /* If CMD ring stopped we own the trbs between enqueue and dequeue */
        if (cmd_comp_code == COMP_CMD_STOP) {
-               xhci_handle_stopped_cmd_ring(xhci, cmd);
+               complete_all(&xhci->cmd_ring_stop_completion);
                return;
        }
 
@@ -1359,8 +1386,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
         */
        if (cmd_comp_code == COMP_CMD_ABORT) {
                xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
-               if (cmd->status == COMP_CMD_ABORT)
+               if (cmd->status == COMP_CMD_ABORT) {
+                       if (xhci->current_cmd == cmd)
+                               xhci->current_cmd = NULL;
                        goto event_handled;
+               }
        }
 
        cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
@@ -1421,7 +1451,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
        if (cmd->cmd_list.next != &xhci->cmd_list) {
                xhci->current_cmd = list_entry(cmd->cmd_list.next,
                                               struct xhci_command, cmd_list);
-               mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+               xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+       } else if (xhci->current_cmd == cmd) {
+               xhci->current_cmd = NULL;
        }
 
 event_handled:
@@ -1939,8 +1971,9 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
        struct xhci_ep_ctx *ep_ctx;
        u32 trb_comp_code;
        u32 remaining, requested;
-       bool on_data_stage;
+       u32 trb_type;
 
+       trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
@@ -1950,14 +1983,11 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
        requested = td->urb->transfer_buffer_length;
        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
 
-       /* not setup (dequeue), or status stage means we are at data stage */
-       on_data_stage = (ep_trb != ep_ring->dequeue && ep_trb != td->last_trb);
-
        switch (trb_comp_code) {
        case COMP_SUCCESS:
-               if (ep_trb != td->last_trb) {
+               if (trb_type != TRB_STATUS) {
                        xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
-                                 on_data_stage ? "data" : "setup");
+                                 (trb_type == TRB_DATA) ? "data" : "setup");
                        *status = -ESHUTDOWN;
                        break;
                }
@@ -1967,15 +1997,25 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                *status = 0;
                break;
        case COMP_STOP_SHORT:
-               if (on_data_stage)
+               if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
                        td->urb->actual_length = remaining;
                else
                        xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
                goto finish_td;
        case COMP_STOP:
-               if (on_data_stage)
+               switch (trb_type) {
+               case TRB_SETUP:
+                       td->urb->actual_length = 0;
+                       goto finish_td;
+               case TRB_DATA:
+               case TRB_NORMAL:
                        td->urb->actual_length = requested - remaining;
-               goto finish_td;
+                       goto finish_td;
+               default:
+                       xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
+                                 trb_type);
+                       goto finish_td;
+               }
        case COMP_STOP_INVAL:
                goto finish_td;
        default:
@@ -1987,7 +2027,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                /* else fall through */
        case COMP_STALL:
                /* Did we transfer part of the data (middle) phase? */
-               if (on_data_stage)
+               if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
                        td->urb->actual_length = requested - remaining;
                else if (!td->urb_length_set)
                        td->urb->actual_length = 0;
@@ -1995,14 +2035,15 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
        }
 
        /* stopped at setup stage, no data transferred */
-       if (ep_trb == ep_ring->dequeue)
+       if (trb_type == TRB_SETUP)
                goto finish_td;
 
        /*
         * if on data stage then update the actual_length of the URB and flag it
         * as set, so it won't be overwritten in the event for the last TRB.
         */
-       if (on_data_stage) {
+       if (trb_type == TRB_DATA ||
+               trb_type == TRB_NORMAL) {
                td->urb_length_set = true;
                td->urb->actual_length = requested - remaining;
                xhci_dbg(xhci, "Waiting for status stage event\n");
@@ -3790,9 +3831,9 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
 
        /* if there are no other commands queued we start the timeout timer */
        if (xhci->cmd_list.next == &cmd->cmd_list &&
-           !timer_pending(&xhci->cmd_timer)) {
+           !delayed_work_pending(&xhci->cmd_timer)) {
                xhci->current_cmd = cmd;
-               mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
+               xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
        }
 
        queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
index 1cd56417cbec455b6db7605d3b3e20aa20a0998d..0c8deb9ed42def112efc8ad1aaf95526315c164f 100644 (file)
@@ -3787,8 +3787,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
 
        mutex_lock(&xhci->mutex);
 
-       if (xhci->xhc_state)    /* dying, removing or halted */
+       if (xhci->xhc_state) {  /* dying, removing or halted */
+               ret = -ESHUTDOWN;
                goto out;
+       }
 
        if (!udev->slot_id) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_address,
index 8ccc11a974b8a62ee0afe18808babb0603071efa..2d7b6374b58d0c092528dff89e761716552735d2 100644 (file)
@@ -1568,7 +1568,8 @@ struct xhci_hcd {
 #define CMD_RING_STATE_STOPPED         (1 << 2)
        struct list_head        cmd_list;
        unsigned int            cmd_ring_reserved_trbs;
-       struct timer_list       cmd_timer;
+       struct delayed_work     cmd_timer;
+       struct completion       cmd_ring_stop_completion;
        struct xhci_command     *current_cmd;
        struct xhci_ring        *event_ring;
        struct xhci_erst        erst;
@@ -1934,7 +1935,7 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
                unsigned int slot_id, unsigned int ep_index,
                struct xhci_dequeue_state *deq_state);
 void xhci_stop_endpoint_command_watchdog(unsigned long arg);
-void xhci_handle_command_timeout(unsigned long data);
+void xhci_handle_command_timeout(struct work_struct *work);
 
 void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
                unsigned int ep_index, unsigned int stream_id);
index 310238c6b5cd699bcd89ef101b926feba5d60a8c..8967980718170d5119d6331ed8395dfa2f4fa970 100644 (file)
@@ -469,6 +469,7 @@ static const struct musb_platform_ops bfin_ops = {
        .init           = bfin_musb_init,
        .exit           = bfin_musb_exit,
 
+       .fifo_offset    = bfin_fifo_offset,
        .readb          = bfin_readb,
        .writeb         = bfin_writeb,
        .readw          = bfin_readw,
index 9e226468a13eb07af0bf5a6407d81cd304436369..fca288bbc8009580ba96198ce1a2a49330074d20 100644 (file)
@@ -2050,6 +2050,7 @@ struct musb_pending_work {
        struct list_head node;
 };
 
+#ifdef CONFIG_PM
 /*
  * Called from musb_runtime_resume(), musb_resume(), and
  * musb_queue_resume_work(). Callers must take musb->lock.
@@ -2077,6 +2078,7 @@ static int musb_run_resume_work(struct musb *musb)
 
        return error;
 }
+#endif
 
 /*
  * Called to run work if device is active or else queue the work to happen
index a611e2f67bdc9e3233e4d6efde27d8ce18fe0a67..ade902ea1221e18543de05a5188c715d86af7398 100644 (file)
@@ -216,6 +216,7 @@ struct musb_platform_ops {
        void    (*pre_root_reset_end)(struct musb *musb);
        void    (*post_root_reset_end)(struct musb *musb);
        int     (*phy_callback)(enum musb_vbus_id_status status);
+       void    (*clear_ep_rxintr)(struct musb *musb, int epnum);
 };
 
 /*
@@ -626,6 +627,12 @@ static inline void musb_platform_post_root_reset_end(struct musb *musb)
                musb->ops->post_root_reset_end(musb);
 }
 
+static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+       if (musb->ops->clear_ep_rxintr)
+               musb->ops->clear_ep_rxintr(musb, epnum);
+}
+
 /*
  * gets the "dr_mode" property from DT and converts it into musb_mode
  * if the property is not found or not recognized returns MUSB_OTG
index feae1561b9abb6924d2fe2fc6f1222dfac9d2be7..9f125e179acd444ca43c73a1a5d263b8e5c8893d 100644 (file)
@@ -267,6 +267,17 @@ static void otg_timer(unsigned long _musb)
        pm_runtime_put_autosuspend(dev);
 }
 
+void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
+{
+       u32 epintr;
+       struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
+       const struct dsps_musb_wrapper *wrp = glue->wrp;
+
+       /* musb->lock might already been held */
+       epintr = (1 << epnum) << wrp->rxep_shift;
+       musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
+}
+
 static irqreturn_t dsps_interrupt(int irq, void *hci)
 {
        struct musb  *musb = hci;
@@ -622,6 +633,7 @@ static struct musb_platform_ops dsps_ops = {
 
        .set_mode       = dsps_musb_set_mode,
        .recover        = dsps_musb_recover,
+       .clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
 };
 
 static u64 musb_dmamask = DMA_BIT_MASK(32);
index f6cdbad00daceb5837a8cfcdec37438389aae800..ac3a4952abb4b290b019ac59fbddd7004b50d949 100644 (file)
@@ -2374,12 +2374,11 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
        int                     is_in = usb_pipein(urb->pipe);
        int                     status = 0;
        u16                     csr;
+       struct dma_channel      *dma = NULL;
 
        musb_ep_select(regs, hw_end);
 
        if (is_dma_capable()) {
-               struct dma_channel      *dma;
-
                dma = is_in ? ep->rx_channel : ep->tx_channel;
                if (dma) {
                        status = ep->musb->dma_controller->channel_abort(dma);
@@ -2395,10 +2394,9 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
                /* giveback saves bulk toggle */
                csr = musb_h_flush_rxfifo(ep, 0);
 
-               /* REVISIT we still get an irq; should likely clear the
-                * endpoint's irq status here to avoid bogus irqs.
-                * clearing that status is platform-specific...
-                */
+               /* clear the endpoint's irq status here to avoid bogus irqs */
+               if (is_dma_capable() && dma)
+                       musb_platform_clear_ep_rxintr(musb, ep->epnum);
        } else if (ep->epnum) {
                musb_h_tx_flush_fifo(ep);
                csr = musb_readw(epio, MUSB_TXCSR);
index f7b13fd252574f848e7984905cbaef4478c2e90a..a3dcbd55e43609b3140979d971a43426a1ce3db0 100644 (file)
@@ -157,5 +157,5 @@ struct musb_dma_controller {
        void __iomem                    *base;
        u8                              channel_count;
        u8                              used_channels;
-       u8                              irq;
+       int                             irq;
 };
index 5f17a3b9916d79ea28c82b04755e23d5680d4b2d..80260b08398b2e55833c65d21e0be70cc43ccf01 100644 (file)
@@ -50,6 +50,7 @@
 #define CYBERJACK_PRODUCT_ID   0x0100
 
 /* Function prototypes */
+static int cyberjack_attach(struct usb_serial *serial);
 static int cyberjack_port_probe(struct usb_serial_port *port);
 static int cyberjack_port_remove(struct usb_serial_port *port);
 static int  cyberjack_open(struct tty_struct *tty,
@@ -77,6 +78,7 @@ static struct usb_serial_driver cyberjack_device = {
        .description =          "Reiner SCT Cyberjack USB card reader",
        .id_table =             id_table,
        .num_ports =            1,
+       .attach =               cyberjack_attach,
        .port_probe =           cyberjack_port_probe,
        .port_remove =          cyberjack_port_remove,
        .open =                 cyberjack_open,
@@ -100,6 +102,14 @@ struct cyberjack_private {
        short           wrsent;         /* Data already sent */
 };
 
+static int cyberjack_attach(struct usb_serial *serial)
+{
+       if (serial->num_bulk_out < serial->num_ports)
+               return -ENODEV;
+
+       return 0;
+}
+
 static int cyberjack_port_probe(struct usb_serial_port *port)
 {
        struct cyberjack_private *priv;
index 8282a6a18fee83f6f1838b2f6f55b37a0ec4d409..22f23a429a95cb6a1cdeaca445a92035fd3e98e8 100644 (file)
@@ -1237,6 +1237,7 @@ static int f81534_attach(struct usb_serial *serial)
 static int f81534_port_probe(struct usb_serial_port *port)
 {
        struct f81534_port_private *port_priv;
+       int ret;
 
        port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL);
        if (!port_priv)
@@ -1246,10 +1247,11 @@ static int f81534_port_probe(struct usb_serial_port *port)
        mutex_init(&port_priv->mcr_mutex);
 
        /* Assign logic-to-phy mapping */
-       port_priv->phy_num = f81534_logic_to_phy_port(port->serial, port);
-       if (port_priv->phy_num < 0 || port_priv->phy_num >= F81534_NUM_PORT)
-               return -ENODEV;
+       ret = f81534_logic_to_phy_port(port->serial, port);
+       if (ret < 0)
+               return ret;
 
+       port_priv->phy_num = ret;
        usb_set_serial_port_data(port, port_priv);
        dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__,
                        port->port_number, port_priv->phy_num);
index 97cabf803c2fa91a134c957dba74c4f77b97c712..b2f2e87aed945d7aceac24c99cf2f7c401beb5b2 100644 (file)
@@ -1043,6 +1043,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
                   "%s - usb_submit_urb(write bulk) failed with status = %d\n",
                                __func__, status);
                count = status;
+               kfree(buffer);
        }
 
        /* we are done with this urb, so let the host driver
index dcc0c58aaad5adaf89bdd535d4cd9699445de3ac..d50e5773483f5494be90ff42633cff70012079cc 100644 (file)
@@ -2751,6 +2751,11 @@ static int edge_startup(struct usb_serial *serial)
                                        EDGE_COMPATIBILITY_MASK1,
                                        EDGE_COMPATIBILITY_MASK2 };
 
+       if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
        dev = serial->dev;
 
        /* create our private serial structure */
index c339163698eb9960f8cc71735d8ed1cf24905e75..9a0db2965fbb45df5fd6b67f5eddb9d90969869c 100644 (file)
@@ -1499,8 +1499,7 @@ static int do_boot_mode(struct edgeport_serial *serial,
 
                dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__);
 
-               /* return an error on purpose */
-               return -ENODEV;
+               return 1;
        }
 
 stayinbootmode:
@@ -1508,7 +1507,7 @@ stayinbootmode:
        dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__);
        serial->product_info.TiMode = TI_MODE_BOOT;
 
-       return 0;
+       return 1;
 }
 
 static int ti_do_config(struct edgeport_port *port, int feature, int on)
@@ -2546,6 +2545,13 @@ static int edge_startup(struct usb_serial *serial)
        int status;
        u16 product_id;
 
+       /* Make sure we have the required endpoints when in download mode. */
+       if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
+               if (serial->num_bulk_in < serial->num_ports ||
+                               serial->num_bulk_out < serial->num_ports)
+                       return -ENODEV;
+       }
+
        /* create our private serial structure */
        edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
        if (!edge_serial)
@@ -2553,14 +2559,18 @@ static int edge_startup(struct usb_serial *serial)
 
        mutex_init(&edge_serial->es_lock);
        edge_serial->serial = serial;
+       INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
        usb_set_serial_data(serial, edge_serial);
 
        status = download_fw(edge_serial);
-       if (status) {
+       if (status < 0) {
                kfree(edge_serial);
                return status;
        }
 
+       if (status > 0)
+               return 1;       /* bind but do not register any ports */
+
        product_id = le16_to_cpu(
                        edge_serial->serial->dev->descriptor.idProduct);
 
@@ -2572,7 +2582,6 @@ static int edge_startup(struct usb_serial *serial)
                }
        }
 
-       INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
        edge_heartbeat_schedule(edge_serial);
 
        return 0;
@@ -2580,6 +2589,9 @@ static int edge_startup(struct usb_serial *serial)
 
 static void edge_disconnect(struct usb_serial *serial)
 {
+       struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+       cancel_delayed_work_sync(&edge_serial->heartbeat_work);
 }
 
 static void edge_release(struct usb_serial *serial)
index 344b4eea4bd59c4da0590f538323f37b5ed74074..d57fb51992182afec68a28fce42a242a4cd46ff2 100644 (file)
@@ -68,6 +68,16 @@ struct iuu_private {
        u32 clk;
 };
 
+static int iuu_attach(struct usb_serial *serial)
+{
+       unsigned char num_ports = serial->num_ports;
+
+       if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
+               return -ENODEV;
+
+       return 0;
+}
+
 static int iuu_port_probe(struct usb_serial_port *port)
 {
        struct iuu_private *priv;
@@ -1196,6 +1206,7 @@ static struct usb_serial_driver iuu_device = {
        .tiocmset = iuu_tiocmset,
        .set_termios = iuu_set_termios,
        .init_termios = iuu_init_termios,
+       .attach = iuu_attach,
        .port_probe = iuu_port_probe,
        .port_remove = iuu_port_remove,
 };
index e49ad0c63ad8b54c61995965445054e34cda6869..83523fcf6fb9d6a75bfba7738362e1c8bbfa01c9 100644 (file)
@@ -699,6 +699,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
 MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
 #endif
 
+static int keyspan_pda_attach(struct usb_serial *serial)
+{
+       unsigned char num_ports = serial->num_ports;
+
+       if (serial->num_bulk_out < num_ports ||
+                       serial->num_interrupt_in < num_ports) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int keyspan_pda_port_probe(struct usb_serial_port *port)
 {
 
@@ -776,6 +789,7 @@ static struct usb_serial_driver keyspan_pda_device = {
        .break_ctl =            keyspan_pda_break_ctl,
        .tiocmget =             keyspan_pda_tiocmget,
        .tiocmset =             keyspan_pda_tiocmset,
+       .attach =               keyspan_pda_attach,
        .port_probe =           keyspan_pda_port_probe,
        .port_remove =          keyspan_pda_port_remove,
 };
index 2363654cafc9b79de61a30bcc00b7f875c470b60..813035f51fe73a4e0de69ce76e6c3329ba4d376c 100644 (file)
@@ -51,6 +51,7 @@
 
 
 /* Function prototypes */
+static int kobil_attach(struct usb_serial *serial);
 static int kobil_port_probe(struct usb_serial_port *probe);
 static int kobil_port_remove(struct usb_serial_port *probe);
 static int  kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
@@ -86,6 +87,7 @@ static struct usb_serial_driver kobil_device = {
        .description =          "KOBIL USB smart card terminal",
        .id_table =             id_table,
        .num_ports =            1,
+       .attach =               kobil_attach,
        .port_probe =           kobil_port_probe,
        .port_remove =          kobil_port_remove,
        .ioctl =                kobil_ioctl,
@@ -113,6 +115,16 @@ struct kobil_private {
 };
 
 
+static int kobil_attach(struct usb_serial *serial)
+{
+       if (serial->num_interrupt_out < serial->num_ports) {
+               dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int kobil_port_probe(struct usb_serial_port *port)
 {
        struct usb_serial *serial = port->serial;
index d52caa03679c6cbc5886316cb27d471d0c4b7bc4..91bc170b408a08a59c49786e2cb5753149ce2425 100644 (file)
@@ -65,8 +65,6 @@ struct moschip_port {
        struct urb              *write_urb_pool[NUM_URBS];
 };
 
-static struct usb_serial_driver moschip7720_2port_driver;
-
 #define USB_VENDOR_ID_MOSCHIP          0x9710
 #define MOSCHIP_DEVICE_ID_7720         0x7720
 #define MOSCHIP_DEVICE_ID_7715         0x7715
@@ -970,25 +968,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
                tty_port_tty_wakeup(&mos7720_port->port->port);
 }
 
-/*
- * mos77xx_probe
- *     this function installs the appropriate read interrupt endpoint callback
- *     depending on whether the device is a 7720 or 7715, thus avoiding costly
- *     run-time checks in the high-frequency callback routine itself.
- */
-static int mos77xx_probe(struct usb_serial *serial,
-                        const struct usb_device_id *id)
-{
-       if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
-               moschip7720_2port_driver.read_int_callback =
-                       mos7715_interrupt_callback;
-       else
-               moschip7720_2port_driver.read_int_callback =
-                       mos7720_interrupt_callback;
-
-       return 0;
-}
-
 static int mos77xx_calc_num_ports(struct usb_serial *serial)
 {
        u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
@@ -1917,6 +1896,11 @@ static int mos7720_startup(struct usb_serial *serial)
        u16 product;
        int ret_val;
 
+       if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
+               dev_err(&serial->interface->dev, "missing bulk endpoints\n");
+               return -ENODEV;
+       }
+
        product = le16_to_cpu(serial->dev->descriptor.idProduct);
        dev = serial->dev;
 
@@ -1941,19 +1925,18 @@ static int mos7720_startup(struct usb_serial *serial)
                        tmp->interrupt_in_endpointAddress;
                serial->port[1]->interrupt_in_urb = NULL;
                serial->port[1]->interrupt_in_buffer = NULL;
+
+               if (serial->port[0]->interrupt_in_urb) {
+                       struct urb *urb = serial->port[0]->interrupt_in_urb;
+
+                       urb->complete = mos7715_interrupt_callback;
+               }
        }
 
        /* setting configuration feature to one */
        usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
                        (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
 
-       /* start the interrupt urb */
-       ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
-       if (ret_val)
-               dev_err(&dev->dev,
-                       "%s - Error %d submitting control urb\n",
-                       __func__, ret_val);
-
 #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
        if (product == MOSCHIP_DEVICE_ID_7715) {
                ret_val = mos7715_parport_init(serial);
@@ -1961,6 +1944,13 @@ static int mos7720_startup(struct usb_serial *serial)
                        return ret_val;
        }
 #endif
+       /* start the interrupt urb */
+       ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+       if (ret_val) {
+               dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
+                       ret_val);
+       }
+
        /* LSR For Port 1 */
        read_mos_reg(serial, 0, MOS7720_LSR, &data);
        dev_dbg(&dev->dev, "LSR:%x\n", data);
@@ -1970,6 +1960,8 @@ static int mos7720_startup(struct usb_serial *serial)
 
 static void mos7720_release(struct usb_serial *serial)
 {
+       usb_kill_urb(serial->port[0]->interrupt_in_urb);
+
 #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
        /* close the parallel port */
 
@@ -2019,11 +2011,6 @@ static int mos7720_port_probe(struct usb_serial_port *port)
        if (!mos7720_port)
                return -ENOMEM;
 
-       /* Initialize all port interrupt end point to port 0 int endpoint.
-        * Our device has only one interrupt endpoint common to all ports.
-        */
-       port->interrupt_in_endpointAddress =
-               port->serial->port[0]->interrupt_in_endpointAddress;
        mos7720_port->port = port;
 
        usb_set_serial_port_data(port, mos7720_port);
@@ -2053,7 +2040,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
        .close                  = mos7720_close,
        .throttle               = mos7720_throttle,
        .unthrottle             = mos7720_unthrottle,
-       .probe                  = mos77xx_probe,
        .attach                 = mos7720_startup,
        .release                = mos7720_release,
        .port_probe             = mos7720_port_probe,
@@ -2067,7 +2053,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
        .chars_in_buffer        = mos7720_chars_in_buffer,
        .break_ctl              = mos7720_break,
        .read_bulk_callback     = mos7720_bulk_in_callback,
-       .read_int_callback      = NULL  /* dynamically assigned in probe() */
+       .read_int_callback      = mos7720_interrupt_callback,
 };
 
 static struct usb_serial_driver * const serial_drivers[] = {
index 9a220b8e810f161be985d9655ec6d9dee90147dc..ea27fb23967a13dd3e3c47d31fdaab52c38028eb 100644 (file)
@@ -214,7 +214,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
 
 struct moschip_port {
        int port_num;           /*Actual port number in the device(1,2,etc) */
-       struct urb *write_urb;  /* write URB for this port */
        struct urb *read_urb;   /* read URB for this port */
        __u8 shadowLCR;         /* last LCR value received */
        __u8 shadowMCR;         /* last MCR value received */
@@ -1037,9 +1036,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
                                serial,
                                serial->port[0]->interrupt_in_urb->interval);
 
-                       /* start interrupt read for mos7840               *
-                        * will continue as long as mos7840 is connected  */
-
+                       /* start interrupt read for mos7840 */
                        response =
                            usb_submit_urb(serial->port[0]->interrupt_in_urb,
                                           GFP_KERNEL);
@@ -1186,7 +1183,6 @@ static void mos7840_close(struct usb_serial_port *port)
                }
        }
 
-       usb_kill_urb(mos7840_port->write_urb);
        usb_kill_urb(mos7840_port->read_urb);
        mos7840_port->read_urb_busy = false;
 
@@ -1199,12 +1195,6 @@ static void mos7840_close(struct usb_serial_port *port)
                }
        }
 
-       if (mos7840_port->write_urb) {
-               /* if this urb had a transfer buffer already (old tx) free it */
-               kfree(mos7840_port->write_urb->transfer_buffer);
-               usb_free_urb(mos7840_port->write_urb);
-       }
-
        Data = 0x0;
        mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data);
 
@@ -2113,6 +2103,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
        return mos7840_num_ports;
 }
 
+static int mos7840_attach(struct usb_serial *serial)
+{
+       if (serial->num_bulk_in < serial->num_ports ||
+                       serial->num_bulk_out < serial->num_ports) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int mos7840_port_probe(struct usb_serial_port *port)
 {
        struct usb_serial *serial = port->serial;
@@ -2388,6 +2389,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
        .tiocmset = mos7840_tiocmset,
        .tiocmiwait = usb_serial_generic_tiocmiwait,
        .get_icount = usb_serial_generic_get_icount,
+       .attach = mos7840_attach,
        .port_probe = mos7840_port_probe,
        .port_remove = mos7840_port_remove,
        .read_bulk_callback = mos7840_bulk_in_callback,
index f6c6900bccf01c87c88734b2a7417db9f0fade8d..a180b17d24323b074aee19e33bf0f497ad271d8a 100644 (file)
@@ -38,6 +38,7 @@ static int  omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
                                const unsigned char *buf, int count);
 static int  omninet_write_room(struct tty_struct *tty);
 static void omninet_disconnect(struct usb_serial *serial);
+static int omninet_attach(struct usb_serial *serial);
 static int omninet_port_probe(struct usb_serial_port *port);
 static int omninet_port_remove(struct usb_serial_port *port);
 
@@ -56,6 +57,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
        .description =          "ZyXEL - omni.net lcd plus usb",
        .id_table =             id_table,
        .num_ports =            1,
+       .attach =               omninet_attach,
        .port_probe =           omninet_port_probe,
        .port_remove =          omninet_port_remove,
        .open =                 omninet_open,
@@ -104,6 +106,17 @@ struct omninet_data {
        __u8    od_outseq;      /* Sequence number for bulk_out URBs */
 };
 
+static int omninet_attach(struct usb_serial *serial)
+{
+       /* The second bulk-out endpoint is used for writing. */
+       if (serial->num_bulk_out < 2) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int omninet_port_probe(struct usb_serial_port *port)
 {
        struct omninet_data *od;
index a4b88bc038b6d0a943b76457714e65a9449881b2..b8bf52bf7a944d5fddd6976f90a63db3cd6807e5 100644 (file)
@@ -134,6 +134,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
 static int oti6858_tiocmget(struct tty_struct *tty);
 static int oti6858_tiocmset(struct tty_struct *tty,
                                unsigned int set, unsigned int clear);
+static int oti6858_attach(struct usb_serial *serial);
 static int oti6858_port_probe(struct usb_serial_port *port);
 static int oti6858_port_remove(struct usb_serial_port *port);
 
@@ -158,6 +159,7 @@ static struct usb_serial_driver oti6858_device = {
        .write_bulk_callback =  oti6858_write_bulk_callback,
        .write_room =           oti6858_write_room,
        .chars_in_buffer =      oti6858_chars_in_buffer,
+       .attach =               oti6858_attach,
        .port_probe =           oti6858_port_probe,
        .port_remove =          oti6858_port_remove,
 };
@@ -324,6 +326,20 @@ static void send_data(struct work_struct *work)
        usb_serial_port_softint(port);
 }
 
+static int oti6858_attach(struct usb_serial *serial)
+{
+       unsigned char num_ports = serial->num_ports;
+
+       if (serial->num_bulk_in < num_ports ||
+                       serial->num_bulk_out < num_ports ||
+                       serial->num_interrupt_in < num_ports) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int oti6858_port_probe(struct usb_serial_port *port)
 {
        struct oti6858_private *priv;
index ae682e4eeaef575a23c90a8dda15407e862d42e1..46fca6b7584686744a9e79aae0bd78db08192813 100644 (file)
@@ -220,9 +220,17 @@ static int pl2303_probe(struct usb_serial *serial,
 static int pl2303_startup(struct usb_serial *serial)
 {
        struct pl2303_serial_private *spriv;
+       unsigned char num_ports = serial->num_ports;
        enum pl2303_type type = TYPE_01;
        unsigned char *buf;
 
+       if (serial->num_bulk_in < num_ports ||
+                       serial->num_bulk_out < num_ports ||
+                       serial->num_interrupt_in < num_ports) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
        spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
        if (!spriv)
                return -ENOMEM;
index 659cb8606bd953a7585a013beeb3dc3989400a14..5709cc93b0837a0b73a8acec627caa05e27c9b64 100644 (file)
@@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port)
 {
        struct usb_serial *serial;
        struct qt2_port_private *port_priv;
-       unsigned long flags;
        int i;
 
        serial = port->serial;
        port_priv = usb_get_serial_port_data(port);
 
-       spin_lock_irqsave(&port_priv->urb_lock, flags);
        usb_kill_urb(port_priv->write_urb);
-       port_priv->urb_in_use = false;
-       spin_unlock_irqrestore(&port_priv->urb_lock, flags);
 
        /* flush the port transmit buffer */
        i = usb_control_msg(serial->dev,
index ef0dbf0703c574eb62d1e2ce3be49e5186258aee..475e6c31b266b013ed4749b482c0477eda4a3904 100644 (file)
@@ -154,6 +154,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
        return 0;
 }
 
+static int spcp8x5_attach(struct usb_serial *serial)
+{
+       unsigned char num_ports = serial->num_ports;
+
+       if (serial->num_bulk_in < num_ports ||
+                       serial->num_bulk_out < num_ports) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
 static int spcp8x5_port_probe(struct usb_serial_port *port)
 {
        const struct usb_device_id *id = usb_get_serial_data(port->serial);
@@ -477,6 +490,7 @@ static struct usb_serial_driver spcp8x5_device = {
        .tiocmget               = spcp8x5_tiocmget,
        .tiocmset               = spcp8x5_tiocmset,
        .probe                  = spcp8x5_probe,
+       .attach                 = spcp8x5_attach,
        .port_probe             = spcp8x5_port_probe,
        .port_remove            = spcp8x5_port_remove,
 };
index 8db9d071d9409a9f6e85ce6b23a1515f90f11157..64b85b8dedf33c9af7ea5faf68b7ea1a97335903 100644 (file)
@@ -579,6 +579,13 @@ static int ti_startup(struct usb_serial *serial)
                goto free_tdev;
        }
 
+       if (serial->num_bulk_in < serial->num_ports ||
+                       serial->num_bulk_out < serial->num_ports) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               status = -ENODEV;
+               goto free_tdev;
+       }
+
        return 0;
 
 free_tdev:
index af3c7eecff91d4a49fb1b49cdfce7b7d4c9b11fe..16cc18369111d039ffededa7559075a869638708 100644 (file)
@@ -2109,6 +2109,13 @@ UNUSUAL_DEV(  0x152d, 0x2566, 0x0114, 0x0114,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA ),
 
+/* Reported-by George Cherian <george.cherian@cavium.com> */
+UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
+               "JMicron",
+               "JMS56x",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /*
  * Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
  * and Mac USB Dock USB-SCSI */
index be1ee89ee9172f4408eab231c9b1e6a52a253c88..36d75c367d2215c4761f72d40b6c34eaf54bec02 100644 (file)
@@ -27,6 +27,45 @@ static LIST_HEAD(parent_list);
 static DEFINE_MUTEX(parent_list_lock);
 static struct class_compat *mdev_bus_compat_class;
 
+static LIST_HEAD(mdev_list);
+static DEFINE_MUTEX(mdev_list_lock);
+
+struct device *mdev_parent_dev(struct mdev_device *mdev)
+{
+       return mdev->parent->dev;
+}
+EXPORT_SYMBOL(mdev_parent_dev);
+
+void *mdev_get_drvdata(struct mdev_device *mdev)
+{
+       return mdev->driver_data;
+}
+EXPORT_SYMBOL(mdev_get_drvdata);
+
+void mdev_set_drvdata(struct mdev_device *mdev, void *data)
+{
+       mdev->driver_data = data;
+}
+EXPORT_SYMBOL(mdev_set_drvdata);
+
+struct device *mdev_dev(struct mdev_device *mdev)
+{
+       return &mdev->dev;
+}
+EXPORT_SYMBOL(mdev_dev);
+
+struct mdev_device *mdev_from_dev(struct device *dev)
+{
+       return dev_is_mdev(dev) ? to_mdev_device(dev) : NULL;
+}
+EXPORT_SYMBOL(mdev_from_dev);
+
+uuid_le mdev_uuid(struct mdev_device *mdev)
+{
+       return mdev->uuid;
+}
+EXPORT_SYMBOL(mdev_uuid);
+
 static int _find_mdev_device(struct device *dev, void *data)
 {
        struct mdev_device *mdev;
@@ -42,7 +81,7 @@ static int _find_mdev_device(struct device *dev, void *data)
        return 0;
 }
 
-static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
+static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid)
 {
        struct device *dev;
 
@@ -56,9 +95,9 @@ static bool mdev_device_exist(struct parent_device *parent, uuid_le uuid)
 }
 
 /* Should be called holding parent_list_lock */
-static struct parent_device *__find_parent_device(struct device *dev)
+static struct mdev_parent *__find_parent_device(struct device *dev)
 {
-       struct parent_device *parent;
+       struct mdev_parent *parent;
 
        list_for_each_entry(parent, &parent_list, next) {
                if (parent->dev == dev)
@@ -69,8 +108,8 @@ static struct parent_device *__find_parent_device(struct device *dev)
 
 static void mdev_release_parent(struct kref *kref)
 {
-       struct parent_device *parent = container_of(kref, struct parent_device,
-                                                   ref);
+       struct mdev_parent *parent = container_of(kref, struct mdev_parent,
+                                                 ref);
        struct device *dev = parent->dev;
 
        kfree(parent);
@@ -78,7 +117,7 @@ static void mdev_release_parent(struct kref *kref)
 }
 
 static
-inline struct parent_device *mdev_get_parent(struct parent_device *parent)
+inline struct mdev_parent *mdev_get_parent(struct mdev_parent *parent)
 {
        if (parent)
                kref_get(&parent->ref);
@@ -86,7 +125,7 @@ inline struct parent_device *mdev_get_parent(struct parent_device *parent)
        return parent;
 }
 
-static inline void mdev_put_parent(struct parent_device *parent)
+static inline void mdev_put_parent(struct mdev_parent *parent)
 {
        if (parent)
                kref_put(&parent->ref, mdev_release_parent);
@@ -95,7 +134,7 @@ static inline void mdev_put_parent(struct parent_device *parent)
 static int mdev_device_create_ops(struct kobject *kobj,
                                  struct mdev_device *mdev)
 {
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
        int ret;
 
        ret = parent->ops->create(kobj, mdev);
@@ -122,7 +161,7 @@ static int mdev_device_create_ops(struct kobject *kobj,
  */
 static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
 {
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
        int ret;
 
        /*
@@ -153,10 +192,10 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
  * Add device to list of registered parent devices.
  * Returns a negative value on error, otherwise 0.
  */
-int mdev_register_device(struct device *dev, const struct parent_ops *ops)
+int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
 {
        int ret;
-       struct parent_device *parent;
+       struct mdev_parent *parent;
 
        /* check for mandatory ops */
        if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups)
@@ -229,7 +268,7 @@ EXPORT_SYMBOL(mdev_register_device);
 
 void mdev_unregister_device(struct device *dev)
 {
-       struct parent_device *parent;
+       struct mdev_parent *parent;
        bool force_remove = true;
 
        mutex_lock(&parent_list_lock);
@@ -266,7 +305,7 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
 {
        int ret;
        struct mdev_device *mdev;
-       struct parent_device *parent;
+       struct mdev_parent *parent;
        struct mdev_type *type = to_mdev_type(kobj);
 
        parent = mdev_get_parent(type->parent);
@@ -316,6 +355,11 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid)
        dev_dbg(&mdev->dev, "MDEV: created\n");
 
        mutex_unlock(&parent->lock);
+
+       mutex_lock(&mdev_list_lock);
+       list_add(&mdev->next, &mdev_list);
+       mutex_unlock(&mdev_list_lock);
+
        return ret;
 
 create_failed:
@@ -329,12 +373,30 @@ create_err:
 
 int mdev_device_remove(struct device *dev, bool force_remove)
 {
-       struct mdev_device *mdev;
-       struct parent_device *parent;
+       struct mdev_device *mdev, *tmp;
+       struct mdev_parent *parent;
        struct mdev_type *type;
        int ret;
+       bool found = false;
 
        mdev = to_mdev_device(dev);
+
+       mutex_lock(&mdev_list_lock);
+       list_for_each_entry(tmp, &mdev_list, next) {
+               if (tmp == mdev) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (found)
+               list_del(&mdev->next);
+
+       mutex_unlock(&mdev_list_lock);
+
+       if (!found)
+               return -ENODEV;
+
        type = to_mdev_type(mdev->type_kobj);
        parent = mdev->parent;
        mutex_lock(&parent->lock);
@@ -342,6 +404,11 @@ int mdev_device_remove(struct device *dev, bool force_remove)
        ret = mdev_device_remove_ops(mdev, force_remove);
        if (ret) {
                mutex_unlock(&parent->lock);
+
+               mutex_lock(&mdev_list_lock);
+               list_add(&mdev->next, &mdev_list);
+               mutex_unlock(&mdev_list_lock);
+
                return ret;
        }
 
@@ -349,7 +416,8 @@ int mdev_device_remove(struct device *dev, bool force_remove)
        device_unregister(dev);
        mutex_unlock(&parent->lock);
        mdev_put_parent(parent);
-       return ret;
+
+       return 0;
 }
 
 static int __init mdev_init(void)
index d35097cbf3d755c027b7a98a8e6daed835754117..a9cefd70a7050a45d88b77dfd17efeffd5d3e95f 100644 (file)
 int  mdev_bus_register(void);
 void mdev_bus_unregister(void);
 
+struct mdev_parent {
+       struct device *dev;
+       const struct mdev_parent_ops *ops;
+       struct kref ref;
+       struct mutex lock;
+       struct list_head next;
+       struct kset *mdev_types_kset;
+       struct list_head type_list;
+};
+
+struct mdev_device {
+       struct device dev;
+       struct mdev_parent *parent;
+       uuid_le uuid;
+       void *driver_data;
+       struct kref ref;
+       struct list_head next;
+       struct kobject *type_kobj;
+};
+
+#define to_mdev_device(dev)    container_of(dev, struct mdev_device, dev)
+#define dev_is_mdev(d)         ((d)->bus == &mdev_bus_type)
+
 struct mdev_type {
        struct kobject kobj;
        struct kobject *devices_kobj;
-       struct parent_device *parent;
+       struct mdev_parent *parent;
        struct list_head next;
        struct attribute_group *group;
 };
@@ -29,8 +52,8 @@ struct mdev_type {
 #define to_mdev_type(_kobj)            \
        container_of(_kobj, struct mdev_type, kobj)
 
-int  parent_create_sysfs_files(struct parent_device *parent);
-void parent_remove_sysfs_files(struct parent_device *parent);
+int  parent_create_sysfs_files(struct mdev_parent *parent);
+void parent_remove_sysfs_files(struct mdev_parent *parent);
 
 int  mdev_create_sysfs_files(struct device *dev, struct mdev_type *type);
 void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type);
index 1a53deb2ee102b677f92d7388eb340a6e0204ffd..802df210929ba5d52924c102867cbe55ba365963 100644 (file)
@@ -92,7 +92,7 @@ static struct kobj_type mdev_type_ktype = {
        .release = mdev_type_release,
 };
 
-struct mdev_type *add_mdev_supported_type(struct parent_device *parent,
+struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
                                          struct attribute_group *group)
 {
        struct mdev_type *type;
@@ -158,7 +158,7 @@ static void remove_mdev_supported_type(struct mdev_type *type)
        kobject_put(&type->kobj);
 }
 
-static int add_mdev_supported_type_groups(struct parent_device *parent)
+static int add_mdev_supported_type_groups(struct mdev_parent *parent)
 {
        int i;
 
@@ -183,7 +183,7 @@ static int add_mdev_supported_type_groups(struct parent_device *parent)
 }
 
 /* mdev sysfs functions */
-void parent_remove_sysfs_files(struct parent_device *parent)
+void parent_remove_sysfs_files(struct mdev_parent *parent)
 {
        struct mdev_type *type, *tmp;
 
@@ -196,7 +196,7 @@ void parent_remove_sysfs_files(struct parent_device *parent)
        kset_unregister(parent->mdev_types_kset);
 }
 
-int parent_create_sysfs_files(struct parent_device *parent)
+int parent_create_sysfs_files(struct mdev_parent *parent)
 {
        int ret;
 
index ffc36758cb84c08ce1b2c4d516bd73b0c18d52d1..fa848a701b8b4ed921ae56248eac3ef0fdc7d5a3 100644 (file)
@@ -27,7 +27,7 @@
 static int vfio_mdev_open(void *device_data)
 {
        struct mdev_device *mdev = device_data;
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
        int ret;
 
        if (unlikely(!parent->ops->open))
@@ -46,7 +46,7 @@ static int vfio_mdev_open(void *device_data)
 static void vfio_mdev_release(void *device_data)
 {
        struct mdev_device *mdev = device_data;
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
 
        if (likely(parent->ops->release))
                parent->ops->release(mdev);
@@ -58,7 +58,7 @@ static long vfio_mdev_unlocked_ioctl(void *device_data,
                                     unsigned int cmd, unsigned long arg)
 {
        struct mdev_device *mdev = device_data;
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
 
        if (unlikely(!parent->ops->ioctl))
                return -EINVAL;
@@ -70,7 +70,7 @@ static ssize_t vfio_mdev_read(void *device_data, char __user *buf,
                              size_t count, loff_t *ppos)
 {
        struct mdev_device *mdev = device_data;
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
 
        if (unlikely(!parent->ops->read))
                return -EINVAL;
@@ -82,7 +82,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
                               size_t count, loff_t *ppos)
 {
        struct mdev_device *mdev = device_data;
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
 
        if (unlikely(!parent->ops->write))
                return -EINVAL;
@@ -93,7 +93,7 @@ static ssize_t vfio_mdev_write(void *device_data, const char __user *buf,
 static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
 {
        struct mdev_device *mdev = device_data;
-       struct parent_device *parent = mdev->parent;
+       struct mdev_parent *parent = mdev->parent;
 
        if (unlikely(!parent->ops->mmap))
                return -EINVAL;
index dcd7c2a9961830b7fd1ff4155d5c302cd55fd809..324c52e3a1a47736a819826eea2d1974e47c7a87 100644 (file)
@@ -1142,6 +1142,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
                        return ret;
 
                vdev->barmap[index] = pci_iomap(pdev, index, 0);
+               if (!vdev->barmap[index]) {
+                       pci_release_selected_regions(pdev, 1 << index);
+                       return -ENOMEM;
+               }
        }
 
        vma->vm_private_data = vdev;
index 5ffd1d9ad4bdf8111a35a9b6b0c217109d238ebf..357243d76f108fe184d2079736726e89a0ad0846 100644 (file)
@@ -193,7 +193,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
        if (!vdev->has_vga)
                return -EINVAL;
 
-       switch (pos) {
+       if (pos > 0xbfffful)
+               return -EINVAL;
+
+       switch ((u32)pos) {
        case 0xa0000 ... 0xbffff:
                count = min(count, (size_t)(0xc0000 - pos));
                iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
index f3726ba12aa6ceccff8a0c96523aa683537be8f4..b3cc33fa6d26a69f44dc013b7bee55f6bd7682ab 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/workqueue.h>
-#include <linux/pid_namespace.h>
 #include <linux/mdev.h>
 #include <linux/notifier.h>
 
@@ -268,28 +267,38 @@ static void vfio_lock_acct(struct task_struct *task, long npage)
 {
        struct vwork *vwork;
        struct mm_struct *mm;
+       bool is_current;
 
        if (!npage)
                return;
 
-       mm = get_task_mm(task);
+       is_current = (task->mm == current->mm);
+
+       mm = is_current ? task->mm : get_task_mm(task);
        if (!mm)
-               return; /* process exited or nothing to do */
+               return; /* process exited */
 
        if (down_write_trylock(&mm->mmap_sem)) {
                mm->locked_vm += npage;
                up_write(&mm->mmap_sem);
-               mmput(mm);
+               if (!is_current)
+                       mmput(mm);
                return;
        }
 
+       if (is_current) {
+               mm = get_task_mm(task);
+               if (!mm)
+                       return;
+       }
+
        /*
         * Couldn't get mmap_sem lock, so must setup to update
         * mm->locked_vm later. If locked_vm were atomic, we
         * wouldn't need this silliness
         */
        vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
-       if (!vwork) {
+       if (WARN_ON(!vwork)) {
                mmput(mm);
                return;
        }
@@ -393,77 +402,71 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
 static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
                                  long npage, unsigned long *pfn_base)
 {
-       unsigned long limit;
-       bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
-                                  CAP_IPC_LOCK);
-       struct mm_struct *mm;
-       long ret, i = 0, lock_acct = 0;
+       unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+       bool lock_cap = capable(CAP_IPC_LOCK);
+       long ret, pinned = 0, lock_acct = 0;
        bool rsvd;
        dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
 
-       mm = get_task_mm(dma->task);
-       if (!mm)
+       /* This code path is only user initiated */
+       if (!current->mm)
                return -ENODEV;
 
-       ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
+       ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
        if (ret)
-               goto pin_pg_remote_exit;
+               return ret;
 
+       pinned++;
        rsvd = is_invalid_reserved_pfn(*pfn_base);
-       limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
        /*
         * Reserved pages aren't counted against the user, externally pinned
         * pages are already counted against the user.
         */
        if (!rsvd && !vfio_find_vpfn(dma, iova)) {
-               if (!lock_cap && mm->locked_vm + 1 > limit) {
+               if (!lock_cap && current->mm->locked_vm + 1 > limit) {
                        put_pfn(*pfn_base, dma->prot);
                        pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
                                        limit << PAGE_SHIFT);
-                       ret = -ENOMEM;
-                       goto pin_pg_remote_exit;
+                       return -ENOMEM;
                }
                lock_acct++;
        }
 
-       i++;
-       if (likely(!disable_hugepages)) {
-               /* Lock all the consecutive pages from pfn_base */
-               for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
-                    i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
-                       unsigned long pfn = 0;
+       if (unlikely(disable_hugepages))
+               goto out;
 
-                       ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn);
-                       if (ret)
-                               break;
+       /* Lock all the consecutive pages from pfn_base */
+       for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
+            pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
+               unsigned long pfn = 0;
 
-                       if (pfn != *pfn_base + i ||
-                           rsvd != is_invalid_reserved_pfn(pfn)) {
+               ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
+               if (ret)
+                       break;
+
+               if (pfn != *pfn_base + pinned ||
+                   rsvd != is_invalid_reserved_pfn(pfn)) {
+                       put_pfn(pfn, dma->prot);
+                       break;
+               }
+
+               if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+                       if (!lock_cap &&
+                           current->mm->locked_vm + lock_acct + 1 > limit) {
                                put_pfn(pfn, dma->prot);
+                               pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+                                       __func__, limit << PAGE_SHIFT);
                                break;
                        }
-
-                       if (!rsvd && !vfio_find_vpfn(dma, iova)) {
-                               if (!lock_cap &&
-                                   mm->locked_vm + lock_acct + 1 > limit) {
-                                       put_pfn(pfn, dma->prot);
-                                       pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
-                                               "exceeded\n", __func__,
-                                               limit << PAGE_SHIFT);
-                                       break;
-                               }
-                               lock_acct++;
-                       }
+                       lock_acct++;
                }
        }
 
-       vfio_lock_acct(dma->task, lock_acct);
-       ret = i;
+out:
+       vfio_lock_acct(current, lock_acct);
 
-pin_pg_remote_exit:
-       mmput(mm);
-       return ret;
+       return pinned;
 }
 
 static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
@@ -473,10 +476,10 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
        long unlocked = 0, locked = 0;
        long i;
 
-       for (i = 0; i < npage; i++) {
+       for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
                if (put_pfn(pfn++, dma->prot)) {
                        unlocked++;
-                       if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT)))
+                       if (vfio_find_vpfn(dma, iova))
                                locked++;
                }
        }
@@ -491,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
                                  unsigned long *pfn_base, bool do_accounting)
 {
        unsigned long limit;
-       bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
-                                  CAP_IPC_LOCK);
+       bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
        struct mm_struct *mm;
        int ret;
        bool rsvd;
index 2d3b691f3fc4885414ae9234950d8e9c798e5fe6..038ac6934fe9d7f865f9711f24fecadc27071b65 100644 (file)
@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
        info->screen_size = resource_size(res);
        info->screen_base = devm_ioremap(&dev->dev, res->start,
                                         info->screen_size);
+       if (!info->screen_base) {
+               framebuffer_release(info);
+               return -ENOMEM;
+       }
+
        info->fbops = &cobalt_lcd_fbops;
        info->fix = cobalt_lcdfb_fix;
        info->fix.smem_start = res->start;
index 778acf80aacbf81042a3debc4ca1e90d58037f6c..85dd20e0572678581ffa03a8ff6b734773230d1e 100644 (file)
@@ -58,9 +58,13 @@ static int xen_map_device_mmio(const struct resource *resources,
        xen_pfn_t *gpfns;
        xen_ulong_t *idxs;
        int *errs;
-       struct xen_add_to_physmap_range xatp;
 
        for (i = 0; i < count; i++) {
+               struct xen_add_to_physmap_range xatp = {
+                       .domid = DOMID_SELF,
+                       .space = XENMAPSPACE_dev_mmio
+               };
+
                r = &resources[i];
                nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
                if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
@@ -87,9 +91,7 @@ static int xen_map_device_mmio(const struct resource *resources,
                        idxs[j] = XEN_PFN_DOWN(r->start) + j;
                }
 
-               xatp.domid = DOMID_SELF;
                xatp.size = nr;
-               xatp.space = XENMAPSPACE_dev_mmio;
 
                set_xen_guest_handle(xatp.gpfns, gpfns);
                set_xen_guest_handle(xatp.idxs, idxs);
index c03f9c86c7e37d580032d1553e829a6f14ef2014..3c41470c7fc4f123b845bc611bb6ea1d48aabc92 100644 (file)
@@ -369,8 +369,7 @@ static void evtchn_fifo_resume(void)
                }
 
                ret = init_control_block(cpu, control_block);
-               if (ret < 0)
-                       BUG();
+               BUG_ON(ret < 0);
        }
 
        /*
index e8c7f09d01be8a483df94db111faec580beb032a..6890897a6f30edab0c0d87926942463f9b481aa9 100644 (file)
@@ -125,7 +125,7 @@ static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
        while (*new) {
                struct user_evtchn *this;
 
-               this = container_of(*new, struct user_evtchn, node);
+               this = rb_entry(*new, struct user_evtchn, node);
 
                parent = *new;
                if (this->port < evtchn->port)
@@ -157,7 +157,7 @@ static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
        while (node) {
                struct user_evtchn *evtchn;
 
-               evtchn = container_of(node, struct user_evtchn, node);
+               evtchn = rb_entry(node, struct user_evtchn, node);
 
                if (evtchn->port < port)
                        node = node->rb_left;
index 478fb91e3df2b8a9f8c75d8538668465feb59462..f905d6eeb0482ee481cb24d9714bc6081a852d1e 100644 (file)
@@ -275,6 +275,10 @@ retry:
                rc = 0;
        } else
                rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
+
+       if (!rc)
+               swiotlb_set_max_segment(PAGE_SIZE);
+
        return rc;
 error:
        if (repeat--) {
@@ -392,7 +396,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
        if (dma_capable(dev, dev_addr, size) &&
            !range_straddles_page_boundary(phys, size) &&
                !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
-               !swiotlb_force) {
+               (swiotlb_force != SWIOTLB_FORCE)) {
                /* we are not interested in the dma_addr returned by
                 * xen_dma_map_page, only in the potential cache flushes executed
                 * by the function. */
@@ -552,7 +556,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                phys_addr_t paddr = sg_phys(sg);
                dma_addr_t dev_addr = xen_phys_to_bus(paddr);
 
-               if (swiotlb_force ||
+               if (swiotlb_force == SWIOTLB_FORCE ||
                    xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
                    !dma_capable(hwdev, dev_addr, sg->length) ||
                    range_straddles_page_boundary(paddr, sg->length)) {
index e74f9c1fbd80a9fe4c6cfbba0176785b005c06fd..867a2e4252086531bcd682367838f4c4c169427d 100644 (file)
@@ -42,7 +42,6 @@ int xb_write(const void *data, unsigned len);
 int xb_read(void *data, unsigned len);
 int xb_data_to_read(void);
 int xb_wait_for_data_to_read(void);
-int xs_input_avail(void);
 extern struct xenstore_domain_interface *xen_store_interface;
 extern int xen_store_evtchn;
 extern enum xenstore_init xen_store_domain_type;
index 6c0ead4be784340019e69942a8d704ab088ea202..79130b31024754dc7560f60732545a8f28c103b1 100644 (file)
@@ -302,6 +302,29 @@ static void watch_fired(struct xenbus_watch *watch,
        mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
+static int xenbus_command_reply(struct xenbus_file_priv *u,
+                               unsigned int msg_type, const char *reply)
+{
+       struct {
+               struct xsd_sockmsg hdr;
+               const char body[16];
+       } msg;
+       int rc;
+
+       msg.hdr = u->u.msg;
+       msg.hdr.type = msg_type;
+       msg.hdr.len = strlen(reply) + 1;
+       if (msg.hdr.len > sizeof(msg.body))
+               return -E2BIG;
+
+       mutex_lock(&u->reply_mutex);
+       rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
+       wake_up(&u->read_waitq);
+       mutex_unlock(&u->reply_mutex);
+
+       return rc;
+}
+
 static int xenbus_write_transaction(unsigned msg_type,
                                    struct xenbus_file_priv *u)
 {
@@ -316,12 +339,12 @@ static int xenbus_write_transaction(unsigned msg_type,
                        rc = -ENOMEM;
                        goto out;
                }
-       } else if (msg_type == XS_TRANSACTION_END) {
+       } else if (u->u.msg.tx_id != 0) {
                list_for_each_entry(trans, &u->transactions, list)
                        if (trans->handle.id == u->u.msg.tx_id)
                                break;
                if (&trans->list == &u->transactions)
-                       return -ESRCH;
+                       return xenbus_command_reply(u, XS_ERROR, "ENOENT");
        }
 
        reply = xenbus_dev_request_and_reply(&u->u.msg);
@@ -372,12 +395,12 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
        path = u->u.buffer + sizeof(u->u.msg);
        token = memchr(path, 0, u->u.msg.len);
        if (token == NULL) {
-               rc = -EILSEQ;
+               rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
                goto out;
        }
        token++;
        if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
-               rc = -EILSEQ;
+               rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
                goto out;
        }
 
@@ -411,23 +434,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
        }
 
        /* Success.  Synthesize a reply to say all is OK. */
-       {
-               struct {
-                       struct xsd_sockmsg hdr;
-                       char body[3];
-               } __packed reply = {
-                       {
-                               .type = msg_type,
-                               .len = sizeof(reply.body)
-                       },
-                       "OK"
-               };
-
-               mutex_lock(&u->reply_mutex);
-               rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
-               wake_up(&u->read_waitq);
-               mutex_unlock(&u->reply_mutex);
-       }
+       rc = xenbus_command_reply(u, msg_type, "OK");
 
 out:
        return rc;
index 4ab67e8cb776ac18fd12bfec2fa7816800f3f5fa..873b4ca82ccbcde4c108a2592fbd7f46b88273a0 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
                 * Tell lockdep we inherited freeze protection from submission
                 * thread.
                 */
-               __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+               if (S_ISREG(file_inode(file)->i_mode))
+                       __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
                file_end_write(file);
        }
 
@@ -1525,7 +1526,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
                 * by telling it the lock got released so that it doesn't
                 * complain about held lock when we return to userspace.
                 */
-               __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+               if (S_ISREG(file_inode(file)->i_mode))
+                       __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
        }
        kfree(iovec);
        return ret;
index 29a02daf08a97896c600e88317e646dbf3ab3c9b..422370293cfd8cfc9b0f527f73498ce7c28244be 100644 (file)
@@ -2298,6 +2298,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                                goto end_coredump;
                }
        }
+       dump_truncate(cprm);
 
        if (!elf_core_write_extra_data(cprm))
                goto end_coredump;
index 6254cee8f8f382bf8aa881426453bae189973d34..5db5d1340d69eccf475f0feac7f85665bd6aceb5 100644 (file)
@@ -328,6 +328,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        struct file *file = iocb->ki_filp;
        struct inode *inode = bdev_file_inode(file);
        struct block_device *bdev = I_BDEV(inode);
+       struct blk_plug plug;
        struct blkdev_dio *dio;
        struct bio *bio;
        bool is_read = (iov_iter_rw(iter) == READ);
@@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        dio->multi_bio = false;
        dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
 
+       blk_start_plug(&plug);
        for (;;) {
                bio->bi_bdev = bdev;
                bio->bi_iter.bi_sector = pos >> 9;
@@ -394,6 +396,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                submit_bio(bio);
                bio = bio_alloc(GFP_KERNEL, nr_pages);
        }
+       blk_finish_plug(&plug);
 
        if (!dio->is_sync)
                return -EIOCBQUEUED;
index 63d197724519b30d8392546f9a8ba9855feaf5aa..ff0b0be92d6122402f02c039eed8e4599fdce4df 100644 (file)
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
        unsigned long flags;
 
        while (1) {
+               void *wtag;
+
                spin_lock_irqsave(lock, flags);
                if (list_empty(list))
                        break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
                spin_unlock_irqrestore(lock, flags);
 
                /*
-                * we don't want to call the ordered free functions
-                * with the lock held though
+                * We don't want to call the ordered free functions with the
+                * lock held though. Save the work as tag for the trace event,
+                * because the callback could free the structure.
                 */
+               wtag = work;
                work->ordered_free(work);
-               trace_btrfs_all_work_done(work);
+               trace_btrfs_all_work_done(wq->fs_info, wtag);
        }
        spin_unlock_irqrestore(lock, flags);
 }
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
 static void normal_work_helper(struct btrfs_work *work)
 {
        struct __btrfs_workqueue *wq;
+       void *wtag;
        int need_order = 0;
 
        /*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
        if (work->ordered_func)
                need_order = 1;
        wq = work->wq;
+       /* Safe for tracepoints in case work gets freed by the callback */
+       wtag = work;
 
        trace_btrfs_work_sched(work);
        thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
                run_ordered_work(wq);
        }
        if (!need_order)
-               trace_btrfs_all_work_done(work);
+               trace_btrfs_all_work_done(wq->fs_info, wtag);
 }
 
 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
index e97302f437a16db24db7d2aa6ae202cde3309fb6..dcd2e798767e57a80e7f50ff791a675e394a62c0 100644 (file)
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                if (ref && ref->seq &&
                    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
                        spin_unlock(&locked_ref->lock);
-                       btrfs_delayed_ref_unlock(locked_ref);
                        spin_lock(&delayed_refs->lock);
                        locked_ref->processing = 0;
                        delayed_refs->num_heads_ready++;
                        spin_unlock(&delayed_refs->lock);
+                       btrfs_delayed_ref_unlock(locked_ref);
                        locked_ref = NULL;
                        cond_resched();
                        count++;
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                                         */
                                        if (must_insert_reserved)
                                                locked_ref->must_insert_reserved = 1;
+                                       spin_lock(&delayed_refs->lock);
                                        locked_ref->processing = 0;
+                                       delayed_refs->num_heads_ready++;
+                                       spin_unlock(&delayed_refs->lock);
                                        btrfs_debug(fs_info,
                                                    "run_delayed_extent_op returned %d",
                                                    ret);
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
 
                spin_unlock(&cluster->refill_lock);
 
-               down_read(&used_bg->data_rwsem);
+               /* We should only have one-level nested. */
+               down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
 
                spin_lock(&cluster->refill_lock);
                if (used_bg == cluster->block_group)
index f2b281ad7af6b9db26b48c6d4f072a850c19d58a..4e024260ad713ffc583d8f2ffaeb7ba6a025014d 100644 (file)
@@ -7059,7 +7059,7 @@ insert:
        write_unlock(&em_tree->lock);
 out:
 
-       trace_btrfs_get_extent(root, em);
+       trace_btrfs_get_extent(root, inode, em);
 
        btrfs_free_path(path);
        if (trans) {
@@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
         * within our reservation, otherwise we need to adjust our inode
         * counter appropriately.
         */
-       if (dio_data->outstanding_extents) {
+       if (dio_data->outstanding_extents >= num_extents) {
                dio_data->outstanding_extents -= num_extents;
        } else {
+               /*
+                * If dio write length has been split due to no large enough
+                * contiguous space, we need to compensate our inode counter
+                * appropriately.
+                */
+               u64 num_needed = num_extents - dio_data->outstanding_extents;
+
                spin_lock(&BTRFS_I(inode)->lock);
-               BTRFS_I(inode)->outstanding_extents += num_extents;
+               BTRFS_I(inode)->outstanding_extents += num_needed;
                spin_unlock(&BTRFS_I(inode)->lock);
        }
 }
index f10bf5213ed8a48b95a8cf3cf51dc3c2c81b8463..eeffff84f280958cfd117e860055d7b9fc683377 100644 (file)
@@ -37,6 +37,7 @@
  */
 #define LOG_INODE_ALL 0
 #define LOG_INODE_EXISTS 1
+#define LOG_OTHER_INODE 2
 
 /*
  * directory trouble cases
@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        if (S_ISDIR(inode->i_mode) ||
            (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                       &BTRFS_I(inode)->runtime_flags) &&
-            inode_only == LOG_INODE_EXISTS))
+            inode_only >= LOG_INODE_EXISTS))
                max_key.type = BTRFS_XATTR_ITEM_KEY;
        else
                max_key.type = (u8)-1;
@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                return ret;
        }
 
-       mutex_lock(&BTRFS_I(inode)->log_mutex);
+       if (inode_only == LOG_OTHER_INODE) {
+               inode_only = LOG_INODE_EXISTS;
+               mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
+                                 SINGLE_DEPTH_NESTING);
+       } else {
+               mutex_lock(&BTRFS_I(inode)->log_mutex);
+       }
 
        /*
         * a brute force approach to making sure we get the most uptodate
@@ -4817,7 +4824,7 @@ again:
                                 * unpin it.
                                 */
                                err = btrfs_log_inode(trans, root, other_inode,
-                                                     LOG_INODE_EXISTS,
+                                                     LOG_OTHER_INODE,
                                                      0, LLONG_MAX, ctx);
                                iput(other_inode);
                                if (err)
index 161342b73ce50256bd2cba63b0d1c5ece503f599..726f928238d0c86a931cd58613799e59cc35fdf8 100644 (file)
@@ -352,7 +352,5 @@ skip:
 
 out:
        btrfs_free_path(path);
-       if (ret)
-               btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
-       return 0;
+       return ret;
 }
index d21771fcf7d345ab4299cb7fa25881ffcc61ef52..0e87401cf33535b03a1d2aa9da6e919d8a56a906 100644 (file)
@@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
                        head = page_buffers(page);
                        bh = head;
                        do {
-                               if (!buffer_mapped(bh))
+                               if (!buffer_mapped(bh) || (bh->b_blocknr < block))
                                        goto next;
                                if (bh->b_blocknr >= block + len)
                                        break;
index 9cd0c0ea7cdbd5b683ab035927636f9f56b751d3..e4b066cd912ad9ea249c3c88f81026a68186095c 100644 (file)
@@ -502,9 +502,9 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
                if (truncate_size)
-                       *truncate_size = capsnap->truncate_size;
+                       *truncate_size = ci->i_truncate_size;
                if (truncate_seq)
-                       *truncate_seq = capsnap->truncate_seq;
+                       *truncate_seq = ci->i_truncate_seq;
        }
        spin_unlock(&ci->i_ceph_lock);
        return snapc;
index 4f49253387a0a59d238a041b1ec8fc9fd7185c40..ec6b35e9f966bfae46e69dfcaffee2769d699bfd 100644 (file)
@@ -2106,6 +2106,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
                        dout("do_request mdsmap err %d\n", err);
                        goto finish;
                }
+               if (mdsc->mdsmap->m_epoch == 0) {
+                       dout("do_request no mdsmap, waiting for map\n");
+                       list_add(&req->r_wait, &mdsc->waiting_for_map);
+                       goto finish;
+               }
                if (!(mdsc->fsc->mount_options->flags &
                      CEPH_MOUNT_OPT_MOUNTWAIT) &&
                    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
index e525b6017cdf863fd811425fb603070be40420b2..ae6b05629ca174d714bbdb3db477bee8f0d8bff8 100644 (file)
@@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align)
        return mod ? dump_skip(cprm, align - mod) : 1;
 }
 EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+       struct file *file = cprm->file;
+       loff_t offset;
+
+       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+               offset = file->f_op->llseek(file, 0, SEEK_CUR);
+               if (i_size_read(file->f_mapping->host) < offset)
+                       do_truncate(file->f_path.dentry, offset, 0, file);
+       }
+}
+EXPORT_SYMBOL(dump_truncate);
index 6eeea1dcba41c2fa75c479ce1a3fa16f7cd2e7a5..95cd4c3b06c326708a3315d3da86bdb9aafd5469 100644 (file)
@@ -248,7 +248,8 @@ retry:
                goto out;
 
        if (fscrypt_dummy_context_enabled(inode)) {
-               memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+               memset(raw_key, 0x42, keysize/2);
+               memset(raw_key+keysize/2, 0x24, keysize - (keysize/2));
                goto got_key;
        }
 
index 6ed7c2eebeec53c7656054d05061dc83a23ef1c0..d6cd7ea4851da877b13c7af306fd07f8468a54f4 100644 (file)
@@ -179,6 +179,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
                BUG_ON(1);
        }
 
+       /* No restrictions on file types which are never encrypted */
+       if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
+           !S_ISLNK(child->i_mode))
+               return 1;
+
        /* no restrictions if the parent directory is not encrypted */
        if (!parent->i_sb->s_cop->is_encrypted(parent))
                return 1;
index 5c74f60d0a5094dc0a27f27ae0acd41667414332..ddcddfeaa03bd942e83738d34c4abaed06fa2709 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -691,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
                                      pgoff_t index, unsigned long pfn)
 {
        struct vm_area_struct *vma;
-       pte_t *ptep;
-       pte_t pte;
+       pte_t pte, *ptep = NULL;
+       pmd_t *pmdp = NULL;
        spinlock_t *ptl;
        bool changed;
 
@@ -707,21 +707,42 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
 
                address = pgoff_address(index, vma);
                changed = false;
-               if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
                        continue;
-               if (pfn != pte_pfn(*ptep))
-                       goto unlock;
-               if (!pte_dirty(*ptep) && !pte_write(*ptep))
-                       goto unlock;
 
-               flush_cache_page(vma, address, pfn);
-               pte = ptep_clear_flush(vma, address, ptep);
-               pte = pte_wrprotect(pte);
-               pte = pte_mkclean(pte);
-               set_pte_at(vma->vm_mm, address, ptep, pte);
-               changed = true;
-unlock:
-               pte_unmap_unlock(ptep, ptl);
+               if (pmdp) {
+#ifdef CONFIG_FS_DAX_PMD
+                       pmd_t pmd;
+
+                       if (pfn != pmd_pfn(*pmdp))
+                               goto unlock_pmd;
+                       if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+                               goto unlock_pmd;
+
+                       flush_cache_page(vma, address, pfn);
+                       pmd = pmdp_huge_clear_flush(vma, address, pmdp);
+                       pmd = pmd_wrprotect(pmd);
+                       pmd = pmd_mkclean(pmd);
+                       set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+                       changed = true;
+unlock_pmd:
+                       spin_unlock(ptl);
+#endif
+               } else {
+                       if (pfn != pte_pfn(*ptep))
+                               goto unlock_pte;
+                       if (!pte_dirty(*ptep) && !pte_write(*ptep))
+                               goto unlock_pte;
+
+                       flush_cache_page(vma, address, pfn);
+                       pte = ptep_clear_flush(vma, address, ptep);
+                       pte = pte_wrprotect(pte);
+                       pte = pte_mkclean(pte);
+                       set_pte_at(vma->vm_mm, address, ptep, pte);
+                       changed = true;
+unlock_pte:
+                       pte_unmap_unlock(ptep, ptl);
+               }
 
                if (changed)
                        mmu_notifier_invalidate_page(vma->vm_mm, address);
index aeae8c06345155e35e6f9d1567d004c6d073ea66..c87bae4376b848f4204c06016eeb223f675dc7db 100644 (file)
@@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
                        struct buffer_head *map_bh)
 {
        const unsigned blkbits = sdio->blkbits;
+       const unsigned i_blkbits = blkbits + sdio->blkfactor;
        int ret = 0;
 
        while (sdio->block_in_file < sdio->final_block_in_request) {
@@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
                                        clean_bdev_aliases(
                                                map_bh->b_bdev,
                                                map_bh->b_blocknr,
-                                               map_bh->b_size >> blkbits);
+                                               map_bh->b_size >> i_blkbits);
                                }
 
                                if (!sdio->blkfactor)
index 0738f48293ccbfe0fd528ededfd6a4a08fe4251d..0d880245375890058602437d3c709bfedd3945b8 100644 (file)
@@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
        }
        sector = SECTOR_FROM_BLOCK(blkstart);
 
-       if (sector & (bdev_zone_size(bdev) - 1) ||
-                               nr_sects != bdev_zone_size(bdev)) {
+       if (sector & (bdev_zone_sectors(bdev) - 1) ||
+           nr_sects != bdev_zone_sectors(bdev)) {
                f2fs_msg(sbi->sb, KERN_INFO,
                        "(%d) %s: Unaligned discard attempted (block %x + %x)",
                        devi, sbi->s_ndevs ? FDEV(devi).path: "",
index 702638e21c7621a4804a8c84fefc7635785ff42d..46fd30d8af7763c93244092b6673e3cc2ff5f05f 100644 (file)
@@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
                return 0;
 
        if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
-                               SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+                               SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
                return -EINVAL;
-       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
        if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
                                __ilog2_u32(sbi->blocks_per_blkz))
                return -EINVAL;
        sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
        FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
                                        sbi->log_blocks_per_blkz;
-       if (nr_sectors & (bdev_zone_size(bdev) - 1))
+       if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
                FDEV(devi).nr_blkz++;
 
        FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
index d3fea0bd89e2cbedcea630ba3e966963720f725a..6043306e8e21518daeb945ebf2569d19fe9edbbf 100644 (file)
@@ -510,18 +510,6 @@ void fsnotify_detach_group_marks(struct fsnotify_group *group)
        }
 }
 
-void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
-{
-       assert_spin_locked(&old->lock);
-       new->inode = old->inode;
-       new->mnt = old->mnt;
-       if (old->group)
-               fsnotify_get_group(old->group);
-       new->group = old->group;
-       new->mask = old->mask;
-       new->free_mark = old->free_mark;
-}
-
 /*
  * Nothing fancy, just initialize lists and locks and counters.
  */
index 83d576f6a287b13939a7dd70622ae4d89b5031f8..77d1632e905d8b1ec249cf86c7ece183c38f2746 100644 (file)
@@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
        mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
             lockres->l_level, new_level);
 
+       /*
+        * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+        * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+        * we can recover correctly from node failure. Otherwise, we may get
+        * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
+        */
+       if (!ocfs2_is_o2cb_active() &&
+           lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+               lvb = 1;
+
        if (lvb)
                dlm_flags |= DLM_LKF_VALBLK;
 
index 52c07346bea3f8960399184c0e6aead7545a25f2..820359096c7aa8df5b76e2a0c1404ce7f9d0337d 100644 (file)
@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
  */
 static struct ocfs2_stack_plugin *active_stack;
 
+inline int ocfs2_is_o2cb_active(void)
+{
+       return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
+}
+EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
+
 static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
 {
        struct ocfs2_stack_plugin *p;
index f2dce10fae543c254dcb4e6628d357b60a3ac16c..e3036e1790e86da7b4e13dcb0b8c88e3f19b6d50 100644 (file)
@@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
 int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
 void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
 
+/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
+int ocfs2_is_o2cb_active(void);
+
 extern struct kset *ocfs2_kset;
 
 #endif  /* STACKGLUE_H */
index 595522022aca04beeeabd3d2ed4e73992f268296..c9d48dc784953fa4af62f9af58dfbb56ed2faa70 100644 (file)
@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
        int error;
 
        if (type == ACL_TYPE_ACCESS) {
-               error = posix_acl_equiv_mode(acl, &inode->i_mode);
-               if (error < 0)
-                       return 0;
-               if (error == 0)
-                       acl = NULL;
+               error = posix_acl_update_mode(inode,
+                               &inode->i_mode, &acl);
+               if (error)
+                       return error;
        }
 
        inode->i_ctime = current_time(inode);
index e5ebc37704608a336dd8690d55f06389cd0173bd..d346d42c54d1590250040f0b36c05367287e7bf5 100644 (file)
@@ -256,6 +256,9 @@ xfs_ag_resv_init(
                        goto out;
        }
 
+       ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+              xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
+              pag->pagf_freeblks + pag->pagf_flcount);
 out:
        return error;
 }
index 5050056a0b06445a93845987a52ec8356c499f80..9f06a211e1570549cb8df3cdba2de2d01a79f890 100644 (file)
@@ -95,10 +95,7 @@ unsigned int
 xfs_alloc_set_aside(
        struct xfs_mount        *mp)
 {
-       unsigned int            blocks;
-
-       blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
-       return blocks;
+       return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
 }
 
 /*
@@ -365,35 +362,11 @@ xfs_alloc_fix_len(
                return;
        ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
        ASSERT(rlen % args->prod == args->mod);
+       ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
+               rlen + args->minleft);
        args->len = rlen;
 }
 
-/*
- * Fix up length if there is too little space left in the a.g.
- * Return 1 if ok, 0 if too little, should give up.
- */
-STATIC int
-xfs_alloc_fix_minleft(
-       xfs_alloc_arg_t *args)          /* allocation argument structure */
-{
-       xfs_agf_t       *agf;           /* a.g. freelist header */
-       int             diff;           /* free space difference */
-
-       if (args->minleft == 0)
-               return 1;
-       agf = XFS_BUF_TO_AGF(args->agbp);
-       diff = be32_to_cpu(agf->agf_freeblks)
-               - args->len - args->minleft;
-       if (diff >= 0)
-               return 1;
-       args->len += diff;              /* shrink the allocated space */
-       /* casts to (int) catch length underflows */
-       if ((int)args->len >= (int)args->minlen)
-               return 1;
-       args->agbno = NULLAGBLOCK;
-       return 0;
-}
-
 /*
  * Update the two btrees, logically removing from freespace the extent
  * starting at rbno, rlen blocks.  The extent is contained within the
@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
        xfs_alloc_arg_t *args)  /* argument structure for allocation */
 {
        int             error=0;
-       xfs_extlen_t    reservation;
-       xfs_extlen_t    oldmax;
 
        ASSERT(args->minlen > 0);
        ASSERT(args->maxlen > 0);
@@ -698,20 +669,6 @@ xfs_alloc_ag_vextent(
        ASSERT(args->mod < args->prod);
        ASSERT(args->alignment > 0);
 
-       /*
-        * Clamp maxlen to the amount of free space minus any reservations
-        * that have been made.
-        */
-       oldmax = args->maxlen;
-       reservation = xfs_ag_resv_needed(args->pag, args->resv);
-       if (args->maxlen > args->pag->pagf_freeblks - reservation)
-               args->maxlen = args->pag->pagf_freeblks - reservation;
-       if (args->maxlen == 0) {
-               args->agbno = NULLAGBLOCK;
-               args->maxlen = oldmax;
-               return 0;
-       }
-
        /*
         * Branch to correct routine based on the type.
         */
@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
                /* NOTREACHED */
        }
 
-       args->maxlen = oldmax;
-
        if (error || args->agbno == NULLAGBLOCK)
                return error;
 
@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
        args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
                                                - args->agbno;
        xfs_alloc_fix_len(args);
-       if (!xfs_alloc_fix_minleft(args))
-               goto not_found;
-
        ASSERT(args->agbno + args->len <= tend);
 
        /*
@@ -1149,12 +1101,7 @@ restart:
                XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
                ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
                args->len = blen;
-               if (!xfs_alloc_fix_minleft(args)) {
-                       xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
-                       trace_xfs_alloc_near_nominleft(args);
-                       return 0;
-               }
-               blen = args->len;
+
                /*
                 * We are allocating starting at bnew for blen blocks.
                 */
@@ -1346,12 +1293,6 @@ restart:
         */
        args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
        xfs_alloc_fix_len(args);
-       if (!xfs_alloc_fix_minleft(args)) {
-               trace_xfs_alloc_near_nominleft(args);
-               xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
-               xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
-               return 0;
-       }
        rlen = args->len;
        (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
                                     args->datatype, ltbnoa, ltlena, &ltnew);
@@ -1553,8 +1494,6 @@ restart:
        }
        xfs_alloc_fix_len(args);
 
-       if (!xfs_alloc_fix_minleft(args))
-               goto out_nominleft;
        rlen = args->len;
        XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
        /*
@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
        int                     flags)
 {
        struct xfs_perag        *pag = args->pag;
-       xfs_extlen_t            longest;
+       xfs_extlen_t            alloc_len, longest;
        xfs_extlen_t            reservation; /* blocks that are still reserved */
        int                     available;
 
@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
        reservation = xfs_ag_resv_needed(pag, args->resv);
 
        /* do we have enough contiguous free space for the allocation? */
+       alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
        longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
                        reservation);
-       if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
+       if (longest < alloc_len)
                return false;
 
        /* do we have enough free space remaining for the allocation? */
        available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
-                         reservation - min_free - args->total);
-       if (available < (int)args->minleft || available <= 0)
+                         reservation - min_free - args->minleft);
+       if (available < (int)max(args->total, alloc_len))
                return false;
 
+       /*
+        * Clamp maxlen to the amount of free space available for the actual
+        * extent allocation.
+        */
+       if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
+               args->maxlen = available;
+               ASSERT(args->maxlen > 0);
+               ASSERT(args->maxlen >= args->minlen);
+       }
+
        return true;
 }
 
@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
        }
 
        need = xfs_alloc_min_freelist(mp, pag);
-       if (!xfs_alloc_space_available(args, need, flags))
+       if (!xfs_alloc_space_available(args, need, flags |
+                       XFS_ALLOC_FLAG_CHECK))
                goto out_agbp_relse;
 
        /*
@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
        xfs_agblock_t   agsize; /* allocation group size */
        int             error;
        int             flags;  /* XFS_ALLOC_FLAG_... locking flags */
-       xfs_extlen_t    minleft;/* minimum left value, temp copy */
        xfs_mount_t     *mp;    /* mount structure pointer */
        xfs_agnumber_t  sagno;  /* starting allocation group number */
        xfs_alloctype_t type;   /* input allocation type */
        int             bump_rotor = 0;
-       int             no_min = 0;
        xfs_agnumber_t  rotorstep = xfs_rotorstep; /* inode32 agf stepper */
 
        mp = args->mp;
@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
                trace_xfs_alloc_vextent_badargs(args);
                return 0;
        }
-       minleft = args->minleft;
 
        switch (type) {
        case XFS_ALLOCTYPE_THIS_AG:
@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
                 */
                args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
                args->pag = xfs_perag_get(mp, args->agno);
-               args->minleft = 0;
                error = xfs_alloc_fix_freelist(args, 0);
-               args->minleft = minleft;
                if (error) {
                        trace_xfs_alloc_vextent_nofix(args);
                        goto error0;
@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
                 */
                for (;;) {
                        args->pag = xfs_perag_get(mp, args->agno);
-                       if (no_min) args->minleft = 0;
                        error = xfs_alloc_fix_freelist(args, flags);
-                       args->minleft = minleft;
                        if (error) {
                                trace_xfs_alloc_vextent_nofix(args);
                                goto error0;
@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
                         * or switch to non-trylock mode.
                         */
                        if (args->agno == sagno) {
-                               if (no_min == 1) {
+                               if (flags == 0) {
                                        args->agbno = NULLAGBLOCK;
                                        trace_xfs_alloc_vextent_allfailed(args);
                                        break;
                                }
-                               if (flags == 0) {
-                                       no_min = 1;
-                               } else {
-                                       flags = 0;
-                                       if (type == XFS_ALLOCTYPE_START_BNO) {
-                                               args->agbno = XFS_FSB_TO_AGBNO(mp,
-                                                       args->fsbno);
-                                               args->type = XFS_ALLOCTYPE_NEAR_BNO;
-                                       }
+
+                               flags = 0;
+                               if (type == XFS_ALLOCTYPE_START_BNO) {
+                                       args->agbno = XFS_FSB_TO_AGBNO(mp,
+                                               args->fsbno);
+                                       args->type = XFS_ALLOCTYPE_NEAR_BNO;
                                }
                        }
                        xfs_perag_put(args->pag);
index 7c404a6b0ae32292cf9a6eb39efd15026e8e76ed..1d0f48a501a3d6e9575b634ac022f30a91a2f309 100644 (file)
@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
 #define        XFS_ALLOC_FLAG_FREEING  0x00000002  /* indicate caller is freeing extents*/
 #define        XFS_ALLOC_FLAG_NORMAP   0x00000004  /* don't modify the rmapbt */
 #define        XFS_ALLOC_FLAG_NOSHRINK 0x00000008  /* don't shrink the freelist */
-
+#define        XFS_ALLOC_FLAG_CHECK    0x00000010  /* test only, don't modify args */
 
 /*
  * Argument structure for xfs_alloc routines.
index 2760bc3b2536c46eedd63c66e127e953dab9ce55..44773c9eb957dd8f6c5904ef9d41c3de880d14be 100644 (file)
@@ -3812,7 +3812,6 @@ xfs_bmap_btalloc(
                args.fsbno = 0;
                args.type = XFS_ALLOCTYPE_FIRST_AG;
                args.total = ap->minlen;
-               args.minleft = 0;
                if ((error = xfs_alloc_vextent(&args)))
                        return error;
                ap->dfops->dop_low = true;
@@ -4344,8 +4343,6 @@ xfs_bmapi_allocate(
        if (error)
                return error;
 
-       if (bma->dfops->dop_low)
-               bma->minleft = 0;
        if (bma->cur)
                bma->cur->bc_private.b.firstblock = *bma->firstblock;
        if (bma->blkno == NULLFSBLOCK)
index d6330c297ca0a4858666403f513f43ce4e16d1a0..d9be241fc86fb39207e725d28bdcdccf943a393d 100644 (file)
@@ -502,12 +502,11 @@ try_another_ag:
        if (args.fsbno == NULLFSBLOCK && args.minleft) {
                /*
                 * Could not find an AG with enough free space to satisfy
-                * a full btree split.  Try again without minleft and if
+                * a full btree split.  Try again and if
                 * successful activate the lowspace algorithm.
                 */
                args.fsbno = 0;
                args.type = XFS_ALLOCTYPE_FIRST_AG;
-               args.minleft = 0;
                error = xfs_alloc_vextent(&args);
                if (error)
                        goto error0;
index 6fb2215f8ff77bf0342e5f61dd6d060987e13d77..50add5272807e95374c3043819205f68c5ec55fa 100644 (file)
@@ -409,13 +409,14 @@ xfs_refcountbt_calc_size(
  */
 xfs_extlen_t
 xfs_refcountbt_max_size(
-       struct xfs_mount        *mp)
+       struct xfs_mount        *mp,
+       xfs_agblock_t           agblocks)
 {
        /* Bail out if we're uninitialized, which can happen in mkfs. */
        if (mp->m_refc_mxr[0] == 0)
                return 0;
 
-       return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks);
+       return xfs_refcountbt_calc_size(mp, agblocks);
 }
 
 /*
@@ -430,22 +431,24 @@ xfs_refcountbt_calc_reserves(
 {
        struct xfs_buf          *agbp;
        struct xfs_agf          *agf;
+       xfs_agblock_t           agblocks;
        xfs_extlen_t            tree_len;
        int                     error;
 
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return 0;
 
-       *ask += xfs_refcountbt_max_size(mp);
 
        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
        if (error)
                return error;
 
        agf = XFS_BUF_TO_AGF(agbp);
+       agblocks = be32_to_cpu(agf->agf_length);
        tree_len = be32_to_cpu(agf->agf_refcount_blocks);
        xfs_buf_relse(agbp);
 
+       *ask += xfs_refcountbt_max_size(mp, agblocks);
        *used += tree_len;
 
        return error;
index 3be7768bd51a1c0ebd8c2ccc6e930a730bd39b54..9db008b955b7ed68bb62d8654073da0302668171 100644 (file)
@@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
 
 extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
                unsigned long long len);
-extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp);
+extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
+               xfs_agblock_t agblocks);
 
 extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
                xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
index de25771764bac313ec514a882d6069de1063e5cc..74e5a54bc428fa27d485d88e871fdd77f8494252 100644 (file)
@@ -550,13 +550,14 @@ xfs_rmapbt_calc_size(
  */
 xfs_extlen_t
 xfs_rmapbt_max_size(
-       struct xfs_mount        *mp)
+       struct xfs_mount        *mp,
+       xfs_agblock_t           agblocks)
 {
        /* Bail out if we're uninitialized, which can happen in mkfs. */
        if (mp->m_rmap_mxr[0] == 0)
                return 0;
 
-       return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks);
+       return xfs_rmapbt_calc_size(mp, agblocks);
 }
 
 /*
@@ -571,25 +572,24 @@ xfs_rmapbt_calc_reserves(
 {
        struct xfs_buf          *agbp;
        struct xfs_agf          *agf;
-       xfs_extlen_t            pool_len;
+       xfs_agblock_t           agblocks;
        xfs_extlen_t            tree_len;
        int                     error;
 
        if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
                return 0;
 
-       /* Reserve 1% of the AG or enough for 1 block per record. */
-       pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp));
-       *ask += pool_len;
-
        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
        if (error)
                return error;
 
        agf = XFS_BUF_TO_AGF(agbp);
+       agblocks = be32_to_cpu(agf->agf_length);
        tree_len = be32_to_cpu(agf->agf_rmap_blocks);
        xfs_buf_relse(agbp);
 
+       /* Reserve 1% of the AG or enough for 1 block per record. */
+       *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
        *used += tree_len;
 
        return error;
index 2a9ac472fb15a2408ca6d58addc6a5d439b61fe9..19c08e93304954d62c1c1dcdf35a36a49f38ee71 100644 (file)
@@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
 
 extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
                unsigned long long len);
-extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp);
+extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
+               xfs_agblock_t agblocks);
 
 extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
                xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
index 0f56fcd3a5d51517b93c391bb3d97a58f205a544..631e7c0e0a29ae51eba5f7e25d8dfc96dc93c61e 100644 (file)
@@ -1152,19 +1152,22 @@ xfs_vm_releasepage(
         * block_invalidatepage() can send pages that are still marked dirty
         * but otherwise have invalidated buffers.
         *
-        * We've historically freed buffers on the latter. Instead, quietly
-        * filter out all dirty pages to avoid spurious buffer state warnings.
-        * This can likely be removed once shrink_active_list() is fixed.
+        * We want to release the latter to avoid unnecessary buildup of the
+        * LRU, skip the former and warn if we've left any lingering
+        * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
+        * or unwritten buffers and warn if the page is not dirty. Otherwise
+        * try to release the buffers.
         */
-       if (PageDirty(page))
-               return 0;
-
        xfs_count_page_state(page, &delalloc, &unwritten);
 
-       if (WARN_ON_ONCE(delalloc))
+       if (delalloc) {
+               WARN_ON_ONCE(!PageDirty(page));
                return 0;
-       if (WARN_ON_ONCE(unwritten))
+       }
+       if (unwritten) {
+               WARN_ON_ONCE(!PageDirty(page));
                return 0;
+       }
 
        return try_to_free_buffers(page);
 }
index 93d12fa2670d53bf8b6bf23c51325d48467b8ab2..242e8091296daff7a029b4233d53db8241ecbcd7 100644 (file)
@@ -631,6 +631,20 @@ xfs_growfs_data_private(
        xfs_set_low_space_thresholds(mp);
        mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
 
+       /*
+        * If we expanded the last AG, free the per-AG reservation
+        * so we can reinitialize it with the new size.
+        */
+       if (new) {
+               struct xfs_perag        *pag;
+
+               pag = xfs_perag_get(mp, agno);
+               error = xfs_ag_resv_free(pag);
+               xfs_perag_put(pag);
+               if (error)
+                       goto out;
+       }
+
        /* Reserve AG metadata blocks. */
        error = xfs_fs_reserve_ag_blocks(mp);
        if (error && error != -ENOSPC)
index ff4d6311c7f4b4912e5bc1c6bdcead81ab855d81..70ca4f608321b9ac9d8123bc2ddc5f216929211c 100644 (file)
@@ -1597,7 +1597,8 @@ xfs_inode_free_cowblocks(
         * If the mapping is dirty or under writeback we cannot touch the
         * CoW fork.  Leave it alone if we're in the midst of a directio.
         */
-       if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
+       if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
+           mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
            mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
            atomic_read(&VFS_I(ip)->i_dio_count))
                return 0;
index c39ac14ff54009ba33a4a7313612bd2afb95117c..b1469f0a91a6c04329cb2fff2dc6e9953485a071 100644 (file)
@@ -3317,12 +3317,8 @@ xfs_log_force(
        xfs_mount_t     *mp,
        uint            flags)
 {
-       int     error;
-
        trace_xfs_log_force(mp, 0, _RET_IP_);
-       error = _xfs_log_force(mp, flags, NULL);
-       if (error)
-               xfs_warn(mp, "%s: error %d returned.", __func__, error);
+       _xfs_log_force(mp, flags, NULL);
 }
 
 /*
@@ -3466,12 +3462,8 @@ xfs_log_force_lsn(
        xfs_lsn_t       lsn,
        uint            flags)
 {
-       int     error;
-
        trace_xfs_log_force(mp, lsn, _RET_IP_);
-       error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
-       if (error)
-               xfs_warn(mp, "%s: error %d returned.", __func__, error);
+       _xfs_log_force_lsn(mp, lsn, flags, NULL);
 }
 
 /*
index fe86a668a57e70419e7fc6e5f126c0aa97cecb96..6e4c7446c3d4561f85d86686d5b4a40bc4cd0ce6 100644 (file)
@@ -526,13 +526,14 @@ xfs_cui_recover(
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
        error = xfs_defer_finish(&tp, &dfops, NULL);
        if (error)
-               goto abort_error;
+               goto abort_defer;
        set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
        error = xfs_trans_commit(tp);
        return error;
 
 abort_error:
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
+abort_defer:
        xfs_defer_cancel(&dfops);
        xfs_trans_cancel(tp);
        return error;
index 276d3023d60f8201b635ae1f0c2ccbf26aac74fd..de6195e3891096316e51fd4efcd49ff091024a4a 100644 (file)
@@ -396,7 +396,7 @@ max_retries_show(
        int             retries;
        struct xfs_error_cfg *cfg = to_error_cfg(kobject);
 
-       if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
+       if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
                retries = -1;
        else
                retries = cfg->max_retries;
@@ -422,7 +422,7 @@ max_retries_store(
                return -EINVAL;
 
        if (val == -1)
-               cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
+               cfg->max_retries = XFS_ERR_RETRY_FOREVER;
        else
                cfg->max_retries = val;
        return count;
index df13637e4017342ac427894ec5c342123b77339e..939869c772b17a07a9776e323e1a253dba1f92d2 100644 (file)
@@ -1,7 +1,13 @@
 #include <linux/bitops.h>
+#undef __memset
 extern void *__memset(void *, int, __kernel_size_t);
+#undef __memcpy
 extern void *__memcpy(void *, const void *, __kernel_size_t);
+#undef __memmove
 extern void *__memmove(void *, const void *, __kernel_size_t);
+#undef memset
 extern void *memset(void *, int, __kernel_size_t);
+#undef memcpy
 extern void *memcpy(void *, const void *, __kernel_size_t);
+#undef memmove
 extern void *memmove(void *, const void *, __kernel_size_t);
diff --git a/include/dt-bindings/mfd/tps65217.h b/include/dt-bindings/mfd/tps65217.h
deleted file mode 100644 (file)
index cafb9e6..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * This header provides macros for TI TPS65217 DT bindings.
- *
- * Copyright (C) 2016 Texas Instruments
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __DT_BINDINGS_TPS65217_H__
-#define __DT_BINDINGS_TPS65217_H__
-
-#define TPS65217_IRQ_USB       0
-#define TPS65217_IRQ_AC                1
-#define TPS65217_IRQ_PB                2
-
-#endif
index 83695641bd5ec272551857c448cc9b4f354898b8..1ca8e8fd10789d0fff1aa0b21ede64b72075e094 100644 (file)
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
        }
 }
 
-static inline unsigned int blk_queue_zone_size(struct request_queue *q)
+static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
 {
        return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
 }
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
        return blk_rq_cur_bytes(rq) >> 9;
 }
 
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request.  Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+       if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+               return rq->special_vec.bv_len;
+       return blk_rq_bytes(rq);
+}
+
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
                                                     int op)
 {
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
        return false;
 }
 
-static inline unsigned int bdev_zone_size(struct block_device *bdev)
+static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);
 
        if (q)
-               return blk_queue_zone_size(q);
+               return blk_queue_zone_sectors(q);
 
        return 0;
 }
index d016a121a8c46492bd6feea5b093dcea9bf27933..28ffa94aed6b85d10531e7dc11864f9ebc8701f4 100644 (file)
@@ -14,6 +14,7 @@ struct coredump_params;
 extern int dump_skip(struct coredump_params *cprm, size_t nr);
 extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
 extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
 #ifdef CONFIG_COREDUMP
 extern void do_coredump(const siginfo_t *siginfo);
 #else
index a07a476178cd1b4c59295ba2c1ee420c1bbb696e..5b1af30ece55828e655b2bf02a149fd71354f3eb 100644 (file)
@@ -103,6 +103,7 @@ typedef     struct {
 
 #define EFI_PAGE_SHIFT         12
 #define EFI_PAGE_SIZE          (1UL << EFI_PAGE_SHIFT)
+#define EFI_PAGES_MAX          (U64_MAX >> EFI_PAGE_SHIFT)
 
 typedef struct {
        u32 type;
@@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
 #endif
 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
 
+extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
 extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
 extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
 extern void __init efi_memmap_unmap(void);
index 0cf34d6cc253853c459e60908aa06128b4a8cf45..487246546ebebb63da20fde2eb12bd9a2914040c 100644 (file)
@@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
 extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
 /* find (and take a reference) to a mark associated with group and vfsmount */
 extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
-/* copy the values from old into new */
-extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
 /* set the ignored_mask of a mark */
 extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
 /* set the mask of a mark (might pin the object into memory */
index e0341af6950e2116a43b3b0281f57fea8099c06f..76f39754e7b0299df616bc3cb909f9a35fce9ea1 100644 (file)
@@ -146,15 +146,6 @@ enum {
        DISK_EVENT_EJECT_REQUEST                = 1 << 1, /* eject requested */
 };
 
-#define BLK_SCSI_MAX_CMDS      (256)
-#define BLK_SCSI_CMD_PER_LONG  (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
-struct blk_scsi_cmd_filter {
-       unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
-       unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
-       struct kobject kobj;
-};
-
 struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
index 4175dca4ac39dd7848e3abcda5eb163865ab2734..0fe0b6295ab58edfe6745467487fe19beba0d723 100644 (file)
@@ -38,9 +38,8 @@ struct vm_area_struct;
 #define ___GFP_ACCOUNT         0x100000u
 #define ___GFP_NOTRACK         0x200000u
 #define ___GFP_DIRECT_RECLAIM  0x400000u
-#define ___GFP_OTHER_NODE      0x800000u
-#define ___GFP_WRITE           0x1000000u
-#define ___GFP_KSWAPD_RECLAIM  0x2000000u
+#define ___GFP_WRITE           0x800000u
+#define ___GFP_KSWAPD_RECLAIM  0x1000000u
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -172,11 +171,6 @@ struct vm_area_struct;
  * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
  *   distinguishing in the source between false positives and allocations that
  *   cannot be supported (e.g. page tables).
- *
- * __GFP_OTHER_NODE is for allocations that are on a remote node but that
- *   should not be accounted for as a remote allocation in vmstat. A
- *   typical user would be khugepaged collapsing a huge page on a remote
- *   node.
  */
 #define __GFP_COLD     ((__force gfp_t)___GFP_COLD)
 #define __GFP_NOWARN   ((__force gfp_t)___GFP_NOWARN)
@@ -184,10 +178,9 @@ struct vm_area_struct;
 #define __GFP_ZERO     ((__force gfp_t)___GFP_ZERO)
 #define __GFP_NOTRACK  ((__force gfp_t)___GFP_NOTRACK)
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 25
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /*
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
 struct page_frag_cache;
-extern void __page_frag_drain(struct page *page, unsigned int order,
-                             unsigned int count);
-extern void *__alloc_page_frag(struct page_frag_cache *nc,
-                              unsigned int fragsz, gfp_t gfp_mask);
-extern void __free_page_frag(void *addr);
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
+extern void *page_frag_alloc(struct page_frag_cache *nc,
+                            unsigned int fragsz, gfp_t gfp_mask);
+extern void page_frag_free(void *addr);
 
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr), 0)
index 228bd44efa4c6efecd3af2e3ec16be8bd02d3d1b..497f2b3a5a62c8da6f87107de16519b176cc9f1f 100644 (file)
@@ -115,6 +115,16 @@ struct st_sensor_bdu {
        u8 mask;
 };
 
+/**
+ * struct st_sensor_das - ST sensor device data alignment selection
+ * @addr: address of the register.
+ * @mask: mask to write the das flag for left alignment.
+ */
+struct st_sensor_das {
+       u8 addr;
+       u8 mask;
+};
+
 /**
  * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
  * @addr: address of the register.
@@ -185,6 +195,7 @@ struct st_sensor_transfer_function {
  * @enable_axis: Enable one or more axis of the sensor.
  * @fs: Full scale register and full scale list available.
  * @bdu: Block data update register.
+ * @das: Data Alignment Selection register.
  * @drdy_irq: Data ready register of the sensor.
  * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
  * @bootime: samples to discard when sensor passing from power-down to power-up.
@@ -200,6 +211,7 @@ struct st_sensor_settings {
        struct st_sensor_axis enable_axis;
        struct st_sensor_fullscale fs;
        struct st_sensor_bdu bdu;
+       struct st_sensor_das das;
        struct st_sensor_data_ready_irq drdy_irq;
        bool multi_read_bit;
        unsigned int bootime;
index 089f70f83e97c9a1adf1087f10d095a02bd3e152..23da3af459fe7af7b5e5c2eabccbda350122e88b 100644 (file)
@@ -14,6 +14,7 @@ struct static_key_deferred {
 
 #ifdef HAVE_JUMP_LABEL
 extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void static_key_deferred_flush(struct static_key_deferred *key);
 extern void
 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
 
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
        STATIC_KEY_CHECK_USE();
        static_key_slow_dec(&key->key);
 }
+static inline void static_key_deferred_flush(struct static_key_deferred *key)
+{
+       STATIC_KEY_CHECK_USE();
+}
 static inline void
 jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
index ec819e9a115af492a8e57d2fe67384accb925436..b6e048e1045f99868642b920a005d2382e6dc316 100644 (file)
 #ifndef MDEV_H
 #define MDEV_H
 
-/* Parent device */
-struct parent_device {
-       struct device           *dev;
-       const struct parent_ops *ops;
-
-       /* internal */
-       struct kref             ref;
-       struct mutex            lock;
-       struct list_head        next;
-       struct kset             *mdev_types_kset;
-       struct list_head        type_list;
-};
-
-/* Mediated device */
-struct mdev_device {
-       struct device           dev;
-       struct parent_device    *parent;
-       uuid_le                 uuid;
-       void                    *driver_data;
-
-       /* internal */
-       struct kref             ref;
-       struct list_head        next;
-       struct kobject          *type_kobj;
-};
+struct mdev_device;
 
 /**
- * struct parent_ops - Structure to be registered for each parent device to
+ * struct mdev_parent_ops - Structure to be registered for each parent device to
  * register the device to mdev module.
  *
  * @owner:             The module owner.
@@ -86,10 +62,9 @@ struct mdev_device {
  *                     @mdev: mediated device structure
  *                     @vma: vma structure
  * Parent device that support mediated device should be registered with mdev
- * module with parent_ops structure.
+ * module with mdev_parent_ops structure.
  **/
-
-struct parent_ops {
+struct mdev_parent_ops {
        struct module   *owner;
        const struct attribute_group **dev_attr_groups;
        const struct attribute_group **mdev_attr_groups;
@@ -103,7 +78,7 @@ struct parent_ops {
                        size_t count, loff_t *ppos);
        ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
                         size_t count, loff_t *ppos);
-       ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
+       long    (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
                         unsigned long arg);
        int     (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
 };
@@ -142,27 +117,22 @@ struct mdev_driver {
 };
 
 #define to_mdev_driver(drv)    container_of(drv, struct mdev_driver, driver)
-#define to_mdev_device(dev)    container_of(dev, struct mdev_device, dev)
-
-static inline void *mdev_get_drvdata(struct mdev_device *mdev)
-{
-       return mdev->driver_data;
-}
 
-static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
-{
-       mdev->driver_data = data;
-}
+extern void *mdev_get_drvdata(struct mdev_device *mdev);
+extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
+extern uuid_le mdev_uuid(struct mdev_device *mdev);
 
 extern struct bus_type mdev_bus_type;
 
-#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
-
 extern int  mdev_register_device(struct device *dev,
-                                const struct parent_ops *ops);
+                                const struct mdev_parent_ops *ops);
 extern void mdev_unregister_device(struct device *dev);
 
 extern int  mdev_register_driver(struct mdev_driver *drv, struct module *owner);
 extern void mdev_unregister_driver(struct mdev_driver *drv);
 
+extern struct device *mdev_parent_dev(struct mdev_device *mdev);
+extern struct device *mdev_dev(struct mdev_device *mdev);
+extern struct mdev_device *mdev_from_dev(struct device *dev);
+
 #endif /* MDEV_H */
index 61d20c17f3b7e40ba706a49da463c2f9eabd7d93..254698856b8fc2723dae61e3dd20b6c8176129e7 100644 (file)
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
  */
 struct mem_cgroup_per_node {
        struct lruvec           lruvec;
-       unsigned long           lru_size[NR_LRU_LISTS];
+       unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
 
        struct mem_cgroup_reclaim_iter  iter[DEF_PRIORITY + 1];
 
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 
 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
-               int nr_pages);
+               int zid, int nr_pages);
 
 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
                                           int nid, unsigned int lru_mask);
@@ -441,9 +441,23 @@ static inline
 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        struct mem_cgroup_per_node *mz;
+       unsigned long nr_pages = 0;
+       int zid;
 
        mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       return mz->lru_size[lru];
+       for (zid = 0; zid < MAX_NR_ZONES; zid++)
+               nr_pages += mz->lru_zone_size[zid][lru];
+       return nr_pages;
+}
+
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+               enum lru_list lru, int zone_idx)
+{
+       struct mem_cgroup_per_node *mz;
+
+       mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+       return mz->lru_zone_size[zone_idx][lru];
 }
 
 void mem_cgroup_handle_over_high(void);
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        return 0;
 }
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+               enum lru_list lru, int zone_idx)
+{
+       return 0;
+}
 
 static inline unsigned long
 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
index 93bdb3485192ff7f4094a649f6f98f4cdad02f51..6533c16e27ad7fb03926286ec94055d26b26f615 100644 (file)
@@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
                                      bool *vlan_offload_disabled);
+void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+                                      struct _rule_hw *eth_header);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index 9f489365b3d39c2400fd2fd19099ca98803396d7..52b437431c6a642b04d33da532cfcd722c69fa0f 100644 (file)
@@ -1071,11 +1071,6 @@ enum {
        MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
 };
 
-enum {
-       MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
-       MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
-};
-
 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
 {
        if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
index 0ae55361e674be4c08ffa33c3c69573eee7bf99b..735b36335f297e8babe3f1d2089acd11a735bc43 100644 (file)
@@ -123,7 +123,6 @@ enum {
        MLX5_REG_HOST_ENDIANNESS = 0x7004,
        MLX5_REG_MCIA            = 0x9014,
        MLX5_REG_MLCR            = 0x902b,
-       MLX5_REG_MPCNT           = 0x9051,
 };
 
 enum mlx5_dcbx_oper_mode {
index 57bec544e20a81070d999fc59253d0774a4c6c3b..a852e9db6f0d5ec809cfc0e5245d1ee3fff7c36b 100644 (file)
@@ -1757,80 +1757,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
        u8         reserved_at_4c0[0x300];
 };
 
-struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
-       u8         life_time_counter_high[0x20];
-
-       u8         life_time_counter_low[0x20];
-
-       u8         rx_errors[0x20];
-
-       u8         tx_errors[0x20];
-
-       u8         l0_to_recovery_eieos[0x20];
-
-       u8         l0_to_recovery_ts[0x20];
-
-       u8         l0_to_recovery_framing[0x20];
-
-       u8         l0_to_recovery_retrain[0x20];
-
-       u8         crc_error_dllp[0x20];
-
-       u8         crc_error_tlp[0x20];
-
-       u8         reserved_at_140[0x680];
-};
-
-struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
-       u8         life_time_counter_high[0x20];
-
-       u8         life_time_counter_low[0x20];
-
-       u8         time_to_boot_image_start[0x20];
-
-       u8         time_to_link_image[0x20];
-
-       u8         calibration_time[0x20];
-
-       u8         time_to_first_perst[0x20];
-
-       u8         time_to_detect_state[0x20];
-
-       u8         time_to_l0[0x20];
-
-       u8         time_to_crs_en[0x20];
-
-       u8         time_to_plastic_image_start[0x20];
-
-       u8         time_to_iron_image_start[0x20];
-
-       u8         perst_handler[0x20];
-
-       u8         times_in_l1[0x20];
-
-       u8         times_in_l23[0x20];
-
-       u8         dl_down[0x20];
-
-       u8         config_cycle1usec[0x20];
-
-       u8         config_cycle2to7usec[0x20];
-
-       u8         config_cycle_8to15usec[0x20];
-
-       u8         config_cycle_16_to_63usec[0x20];
-
-       u8         config_cycle_64usec[0x20];
-
-       u8         correctable_err_msg_sent[0x20];
-
-       u8         non_fatal_err_msg_sent[0x20];
-
-       u8         fatal_err_msg_sent[0x20];
-
-       u8         reserved_at_2e0[0x4e0];
-};
-
 struct mlx5_ifc_cmd_inter_comp_event_bits {
        u8         command_completion_vector[0x20];
 
@@ -2995,12 +2921,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
        u8         reserved_at_0[0x7c0];
 };
 
-union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
-       struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
-       struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
-       u8         reserved_at_0[0x7c0];
-};
-
 union mlx5_ifc_event_auto_bits {
        struct mlx5_ifc_comp_event_bits comp_event;
        struct mlx5_ifc_dct_events_bits dct_events;
@@ -7320,18 +7240,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
        union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
 };
 
-struct mlx5_ifc_mpcnt_reg_bits {
-       u8         reserved_at_0[0x8];
-       u8         pcie_index[0x8];
-       u8         reserved_at_10[0xa];
-       u8         grp[0x6];
-
-       u8         clr[0x1];
-       u8         reserved_at_21[0x1f];
-
-       union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
-};
-
 struct mlx5_ifc_ppad_reg_bits {
        u8         reserved_at_0[0x3];
        u8         single_mac[0x1];
@@ -7937,7 +7845,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
        struct mlx5_ifc_ppad_reg_bits ppad_reg;
        struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
-       struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
        struct mlx5_ifc_pplm_reg_bits pplm_reg;
        struct mlx5_ifc_pplr_reg_bits pplr_reg;
        struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
index fe6b4036664a9a7c82fe4a22be93288928eceac1..b84615b0f64c294eebb72095cc8dbc874b4ae22b 100644 (file)
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma);
 void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
-              spinlock_t **ptlp);
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
        unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
index 71613e8a720f99b6870f1e3f8957d2761a6ac1a0..41d376e7116dccae22d9881312cfb660cd9fd58e 100644 (file)
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
 {
        __update_lru_size(lruvec, lru, zid, nr_pages);
 #ifdef CONFIG_MEMCG
-       mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+       mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
 #endif
 }
 
index 994f7423a74bd622884c3b646f4123d28697b8ad..9bde9558b59672a866bd763039d326bde2af0f81 100644 (file)
@@ -2477,14 +2477,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
        return NAPI_GRO_CB(skb)->frag0_len < hlen;
 }
 
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+       NAPI_GRO_CB(skb)->frag0 = NULL;
+       NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
                                        unsigned int offset)
 {
        if (!pskb_may_pull(skb, hlen))
                return NULL;
 
-       NAPI_GRO_CB(skb)->frag0 = NULL;
-       NAPI_GRO_CB(skb)->frag0_len = 0;
+       skb_gro_frag0_invalidate(skb);
        return skb->data + offset;
 }
 
index 5dea8f6440e44f8d7345289847deaa9e9434e136..52bda854593b4a9eae9bad2d9990f9dd166ed609 100644 (file)
@@ -306,7 +306,9 @@ void radix_tree_iter_replace(struct radix_tree_root *,
 void radix_tree_replace_slot(struct radix_tree_root *root,
                             void **slot, void *item);
 void __radix_tree_delete_node(struct radix_tree_root *root,
-                             struct radix_tree_node *node);
+                             struct radix_tree_node *node,
+                             radix_tree_update_node_t update_node,
+                             void *private);
 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
 void radix_tree_clear_tags(struct radix_tree_root *root,
index e2f3a3281d8fd29de861f4d46ccf7da54ce6eea8..8265d351c9f0e6bc7814fd59ca1efff531e4798f 100644 (file)
@@ -408,7 +408,8 @@ enum rproc_crash_type {
  * @crash_comp: completion used to sync crash handler and the rproc reload
  * @recovery_disabled: flag that state if recovery was disabled
  * @max_notifyid: largest allocated notify id.
- * @table_ptr: our copy of the resource table
+ * @table_ptr: pointer to the resource table in effect
+ * @cached_table: copy of the resource table
  * @has_iommu: flag to indicate if remote processor is behind an MMU
  */
 struct rproc {
@@ -440,6 +441,7 @@ struct rproc {
        bool recovery_disabled;
        int max_notifyid;
        struct resource_table *table_ptr;
+       struct resource_table *cached_table;
        bool has_iommu;
        bool auto_boot;
 };
index 4d1905245c7aa50df56acf0f77c77f3347c28c04..ad3ec9ec61f7b6de743b5d6de4225defcc18be99 100644 (file)
@@ -854,6 +854,16 @@ struct signal_struct {
 
 #define SIGNAL_UNKILLABLE      0x00000040 /* for init: ignore fatal signals */
 
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+                         SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+                                        unsigned int flags)
+{
+       WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+       sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
 /* If true, all threads except ->group_exit_task have pending SIGKILL */
 static inline int signal_group_exit(const struct signal_struct *sig)
 {
index b53c0cfd417e3bec1c5cfab76787901bafa5884c..a410715bbef8889d148a6b3f8dbd6afc4ae6f4d0 100644 (file)
@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
 
 static inline void skb_free_frag(void *addr)
 {
-       __free_page_frag(addr);
+       page_frag_free(addr);
 }
 
 void *napi_alloc_frag(unsigned int fragsz);
index 084b12bad198232426c6beb27cd348d72797b344..4c536356681543d8749bc5013d619f6c5c7a5bd3 100644 (file)
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  */
 #define KMALLOC_SHIFT_HIGH     (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX      (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX      (MAX_ORDER + PAGE_SHIFT - 1)
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW      3
 #endif
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
  * be allocated from the same page.
  */
 #define KMALLOC_SHIFT_HIGH     PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX      30
+#define KMALLOC_SHIFT_MAX      (MAX_ORDER + PAGE_SHIFT - 1)
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW      3
 #endif
index 09f4be179ff304ed0c5e921c8b9bd6e33112c886..7f47b7098b1b9b9cfb6aacfca126826160eaaa8c 100644 (file)
@@ -150,8 +150,9 @@ enum {
        SWP_FILE        = (1 << 7),     /* set after swap_activate success */
        SWP_AREA_DISCARD = (1 << 8),    /* single-time swap area discards */
        SWP_PAGE_DISCARD = (1 << 9),    /* freed swap page-cluster discards */
+       SWP_STABLE_WRITES = (1 << 10),  /* no overwrite PG_writeback pages */
                                        /* add others here before... */
-       SWP_SCANNING    = (1 << 10),    /* refcount in scan_swap_map */
+       SWP_SCANNING    = (1 << 11),    /* refcount in scan_swap_map */
 };
 
 #define SWAP_CLUSTER_MAX 32UL
index 183f37c8a5e164ad5864ac433dd69093bac5bad7..4ee479f2f355b1fa2658b1c2aabbdfc376125651 100644 (file)
@@ -9,7 +9,13 @@ struct device;
 struct page;
 struct scatterlist;
 
-extern int swiotlb_force;
+enum swiotlb_force {
+       SWIOTLB_NORMAL,         /* Default - depending on HW DMA mask etc. */
+       SWIOTLB_FORCE,          /* swiotlb=force */
+       SWIOTLB_NO_FORCE,       /* swiotlb=noforce */
+};
+
+extern enum swiotlb_force swiotlb_force;
 
 /*
  * Maximum allowable number of contiguous slabs to map,
@@ -108,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
 #ifdef CONFIG_SWIOTLB
 extern void __init swiotlb_free(void);
+unsigned int swiotlb_max_segment(void);
 #else
 static inline void swiotlb_free(void) { }
+static inline unsigned int swiotlb_max_segment(void) { return 0; }
 #endif
 
 extern void swiotlb_print_info(void);
 extern int is_swiotlb_buffer(phys_addr_t paddr);
+extern void swiotlb_set_max_segment(unsigned int);
 
 #endif /* __LINUX_SWIOTLB_H */
index bd36ce431e32cbb57a1ec4e97964b78774787a2e..bab0b1ad0613eb7de475b88546cecd7a52645e90 100644 (file)
@@ -8,23 +8,7 @@
 #ifndef _LINUX_TIMERFD_H
 #define _LINUX_TIMERFD_H
 
-/* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/fcntl.h>
-
-/* For _IO helpers */
-#include <linux/ioctl.h>
-
-/*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
- * new flags, since they might collide with O_* ones. We want
- * to re-use O_* flags that couldn't possibly have a meaning
- * from eventfd, in order to leave a free define-space for
- * shared O_* flags.
- */
-#define TFD_TIMER_ABSTIME (1 << 0)
-#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
-#define TFD_CLOEXEC O_CLOEXEC
-#define TFD_NONBLOCK O_NONBLOCK
+#include <uapi/linux/timerfd.h>
 
 #define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
 /* Flags for timerfd_create.  */
@@ -32,6 +16,4 @@
 /* Flags for timerfd_settime.  */
 #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
 
-#define TFD_IOC_SET_TICKS      _IOW('T', 0, u64)
-
 #endif /* _LINUX_TIMERFD_H */
index 530c57bdefa06567c3bcaba9249845af1d4e535a..915c4357945c27992aa3b7561ab705dedff082ad 100644 (file)
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
                HDMI_AC97,
                HDMI_SPDIF,
        } fmt;
-       int bit_clk_inv:1;
-       int frame_clk_inv:1;
-       int bit_clk_master:1;
-       int frame_clk_master:1;
+       unsigned int bit_clk_inv:1;
+       unsigned int frame_clk_inv:1;
+       unsigned int bit_clk_master:1;
+       unsigned int frame_clk_master:1;
 };
 
 /*
index 2b502f6cc6d036be6e25c6834671be1cde2fd8da..b86168a21d56c7fff1d59dfacbef93049bde888e 100644 (file)
@@ -813,6 +813,7 @@ struct snd_soc_component {
        unsigned int suspended:1; /* is in suspend PM state */
 
        struct list_head list;
+       struct list_head card_aux_list; /* for auxiliary bound components */
        struct list_head card_list;
 
        struct snd_soc_dai_driver *dai_drv;
@@ -1152,6 +1153,7 @@ struct snd_soc_card {
         */
        struct snd_soc_aux_dev *aux_dev;
        int num_aux_devs;
+       struct list_head aux_comp_list;
 
        const struct snd_kcontrol_new *controls;
        int num_controls;
@@ -1547,6 +1549,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
        INIT_LIST_HEAD(&card->widgets);
        INIT_LIST_HEAD(&card->paths);
        INIT_LIST_HEAD(&card->dapm_list);
+       INIT_LIST_HEAD(&card->aux_comp_list);
        INIT_LIST_HEAD(&card->component_dev_list);
 }
 
index 29e6858bb1648b636dcce48072f9fe43e5a8a884..43edf82e54fffce7b3d8bfed8c832aebfc60c79b 100644 (file)
@@ -174,6 +174,10 @@ enum tcm_sense_reason_table {
        TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED  = R(0x16),
        TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED  = R(0x17),
        TCM_COPY_TARGET_DEVICE_NOT_REACHABLE    = R(0x18),
+       TCM_TOO_MANY_TARGET_DESCS               = R(0x19),
+       TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE   = R(0x1a),
+       TCM_TOO_MANY_SEGMENT_DESCS              = R(0x1b),
+       TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE  = R(0x1c),
 #undef R
 };
 
index c14bed4ab0977fc1d7d06b38051644f8e67e2349..88d18a8ceb59f9c6e3c5630a491cf6d3c91cf73c 100644 (file)
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
                                BTRFS_I(inode)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
-                 "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+       TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
+                 "disk_i_size=%llu last_trans=%llu logged_trans=%llu",
                  show_root_type(__entry->root_objectid),
                  (unsigned long long)__entry->generation,
                  (unsigned long)__entry->ino,
@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
 
 TRACE_EVENT_CONDITION(btrfs_get_extent,
 
-       TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+       TP_PROTO(struct btrfs_root *root, struct inode *inode,
+                struct extent_map *map),
 
-       TP_ARGS(root, map),
+       TP_ARGS(root, inode, map),
 
        TP_CONDITION(map),
 
        TP_STRUCT__entry_btrfs(
                __field(        u64,  root_objectid     )
+               __field(        u64,  ino               )
                __field(        u64,  start             )
                __field(        u64,  len               )
                __field(        u64,  orig_start        )
@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
 
        TP_fast_assign_btrfs(root->fs_info,
                __entry->root_objectid  = root->root_key.objectid;
-               __entry->start          = map->start;
+               __entry->ino            = btrfs_ino(inode);
+               __entry->start          = map->start;
                __entry->len            = map->len;
                __entry->orig_start     = map->orig_start;
                __entry->block_start    = map->block_start;
@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
                __entry->compress_type  = map->compress_type;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
-                 "orig_start = %llu, block_start = %llu(%s), "
-                 "block_len = %llu, flags = %s, refs = %u, "
-                 "compress_type = %u",
+       TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
+                 "orig_start=%llu block_start=%llu(%s) "
+                 "block_len=%llu flags=%s refs=%u "
+                 "compress_type=%u",
                  show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->ino,
                  (unsigned long long)__entry->start,
                  (unsigned long long)__entry->len,
                  (unsigned long long)__entry->orig_start,
@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
                __field(        int,  compress_type     )
                __field(        int,  refs              )
                __field(        u64,  root_objectid     )
+               __field(        u64,  truncated_len     )
        ),
 
        TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
                __entry->refs           = atomic_read(&ordered->refs);
                __entry->root_objectid  =
                                BTRFS_I(inode)->root->root_key.objectid;
+               __entry->truncated_len  = ordered->truncated_len;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
-                 "start = %llu, len = %llu, disk_len = %llu, "
-                 "bytes_left = %llu, flags = %s, compress_type = %d, "
-                 "refs = %d",
+       TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
+                 "start=%llu len=%llu disk_len=%llu "
+                 "truncated_len=%llu "
+                 "bytes_left=%llu flags=%s compress_type=%d "
+                 "refs=%d",
                  show_root_type(__entry->root_objectid),
                  (unsigned long long)__entry->ino,
                  (unsigned long long)__entry->file_offset,
                  (unsigned long long)__entry->start,
                  (unsigned long long)__entry->len,
                  (unsigned long long)__entry->disk_len,
+                 (unsigned long long)__entry->truncated_len,
                  (unsigned long long)__entry->bytes_left,
                  show_ordered_flags(__entry->flags),
                  __entry->compress_type, __entry->refs)
@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
                                 BTRFS_I(inode)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
-                 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
-                 "range_end = %llu, for_kupdate = %d, "
-                 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+       TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
+                 "nr_to_write=%ld pages_skipped=%ld range_start=%llu "
+                 "range_end=%llu for_kupdate=%d "
+                 "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
                  show_root_type(__entry->root_objectid),
                  (unsigned long)__entry->ino, __entry->index,
                  __entry->nr_to_write, __entry->pages_skipped,
@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
                         BTRFS_I(page->mapping->host)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
-                 "end = %llu, uptodate = %d",
+       TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
+                 "end=%llu uptodate=%d",
                  show_root_type(__entry->root_objectid),
                  (unsigned long)__entry->ino, (unsigned long)__entry->index,
                  (unsigned long long)__entry->start,
@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
                                 BTRFS_I(inode)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+       TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
                  show_root_type(__entry->root_objectid),
                  (unsigned long)__entry->ino, (unsigned long)__entry->parent,
                  __entry->datasync)
@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
                __entry->create         = create;
        ),
 
-       TP_printk("%pU: block_group offset = %llu, size = %llu, "
-                 "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
-                 "create = %d", __entry->fsid,
+       TP_printk("%pU: block_group offset=%llu size=%llu "
+                 "flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
+                 "create=%d", __entry->fsid,
                  (unsigned long long)__entry->offset,
                  (unsigned long long)__entry->size,
                  (unsigned long long)__entry->flags,
@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
                __entry->seq            = ref->seq;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
-                 "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
-                 "type = %s, seq = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+                 "parent=%llu(%s) ref_root=%llu(%s) level=%d "
+                 "type=%s seq=%llu",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes,
                  show_ref_action(__entry->action),
@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
                __entry->seq            = ref->seq;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
-                 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
-                 "offset = %llu, type = %s, seq = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+                 "parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
+                 "offset=%llu type=%s seq=%llu",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes,
                  show_ref_action(__entry->action),
@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
                __entry->is_data        = head_ref->is_data;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes,
                  show_ref_action(__entry->action),
@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
                __entry->root_objectid  = fs_info->chunk_root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
-                 "num_stripes = %d, sub_stripes = %d, type = %s",
+       TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
+                 "num_stripes=%d sub_stripes=%d type=%s",
                  show_root_type(__entry->root_objectid),
                  (unsigned long long)__entry->offset,
                  (unsigned long long)__entry->size,
@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
                __entry->cow_level      = btrfs_header_level(cow);
        ),
 
-       TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
-                 "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+       TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
+                 "(orig_level=%d) cow_buf=%llu (cow_level=%d)",
                  show_root_type(__entry->root_objectid),
                  __entry->refs,
                  (unsigned long long)__entry->buf_start,
@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
                __assign_str(reason, reason)
        ),
 
-       TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
+       TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
                  __entry->fsid, __get_str(reason), __entry->flush,
                  show_flush_action(__entry->flush),
                  (unsigned long long)__entry->flags,
@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
                __entry->ret            =       ret;
        ),
 
-       TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
-                 "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
+       TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
+                 "orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
                  show_flush_state(__entry->state),
                  (unsigned long long)__entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
                __entry->len            = len;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
+       TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
                  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
                  (unsigned long long)__entry->start,
                  (unsigned long long)__entry->len)
@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
                __entry->data           = data;
        ),
 
-       TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+       TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
                  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
                  __entry->num_bytes, __entry->empty_size, __entry->data,
                  __print_flags((unsigned long)__entry->data, "|",
@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
                __entry->len            = len;
        ),
 
-       TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
-                 "start = %Lu, len = %Lu",
+       TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
+                 "start=%Lu len=%Lu",
                  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
                  __entry->bg_objectid,
                  __entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
                __entry->min_bytes      = min_bytes;
        ),
 
-       TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
-                 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+       TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
+                 "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
                  __entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
                                BTRFS_GROUP_FLAGS), __entry->start,
@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
                __entry->bg_objectid    = block_group->key.objectid;
        ),
 
-       TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
+       TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
 );
 
 TRACE_EVENT(btrfs_setup_cluster,
@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
                __entry->bitmap         = bitmap;
        ),
 
-       TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
-                 "size = %Lu, max_size = %Lu, bitmap = %d",
+       TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
+                 "size=%Lu max_size=%Lu bitmap=%d",
                  __entry->bg_objectid,
                  __entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
                __entry->ip     = IP
        ),
 
-       TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
+       TP_printk("state=%p mask=%s caller=%pS", __entry->state,
                  show_gfp_flags(__entry->mask), (void *)__entry->ip)
 );
 
@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
                __entry->ip = IP
        ),
 
-       TP_printk(" state=%p; caller = %pS", __entry->state,
+       TP_printk("state=%p caller=%pS", __entry->state,
                  (void *)__entry->ip)
 );
 
@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
                __entry->normal_work    = &work->normal_work;
        ),
 
-       TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
-                 " ordered_free=%p",
+       TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
+                 "ordered_free=%p",
                  __entry->work, __entry->normal_work, __entry->wq,
                   __entry->func, __entry->ordered_func, __entry->ordered_free)
 );
 
-/* For situiations that the work is freed */
+/*
+ * For situiations when the work is freed, we pass fs_info and a tag that that
+ * matches address of the work structure so it can be paired with the
+ * scheduling event.
+ */
 DECLARE_EVENT_CLASS(btrfs__work__done,
 
-       TP_PROTO(struct btrfs_work *work),
+       TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
 
-       TP_ARGS(work),
+       TP_ARGS(fs_info, wtag),
 
        TP_STRUCT__entry_btrfs(
-               __field(        void *, work                    )
+               __field(        void *, wtag                    )
        ),
 
-       TP_fast_assign_btrfs(btrfs_work_owner(work),
-               __entry->work           = work;
+       TP_fast_assign_btrfs(fs_info,
+               __entry->wtag           = wtag;
        ),
 
-       TP_printk_btrfs("work->%p", __entry->work)
+       TP_printk_btrfs("work->%p", __entry->wtag)
 );
 
 DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
 
 DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
 
-       TP_PROTO(struct btrfs_work *work),
+       TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
 
-       TP_ARGS(work)
+       TP_ARGS(fs_info, wtag)
 );
 
 DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
                __entry->high           = high;
        ),
 
-       TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
+       TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
                  __print_flags(__entry->high, "",
                                {(WQ_HIGHPRI),  "-high"}),
                  __entry->wq)
@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
                __entry->free_reserved  =       free_reserved;
        ),
 
-       TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
+       TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
                  __entry->rootid, __entry->ino, __entry->free_reserved)
 );
 
@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
                __entry->op             = op;
        ),
 
-       TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
+       TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
                  __entry->rootid, __entry->ino, __entry->start, __entry->len,
                  __entry->reserved,
                  __print_flags((unsigned long)__entry->op, "",
@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
                __entry->reserved       = reserved;
        ),
 
-       TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
+       TP_printk_btrfs("root=%llu reserved=%llu op=free",
                  __entry->ref_root, __entry->reserved)
 );
 
@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
                __entry->num_bytes      = rec->num_bytes;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes)
 );
@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
                __entry->nr_new_roots   = nr_new_roots;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
-                 "nr_new_roots = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
+                 "nr_new_roots=%llu",
                  __entry->bytenr,
                  __entry->num_bytes,
                  __entry->nr_old_roots,
@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
                __entry->cur_new_count  = cur_new_count;
        ),
 
-       TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+       TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
                  __entry->qgid,
                  __entry->cur_old_count,
                  __entry->cur_new_count)
index 9e687ca9a307b1344889186e46dd5e62a85f614f..15bf875d0e4a666d91c959bd10b62318b62d61d5 100644 (file)
@@ -47,8 +47,7 @@
        {(unsigned long)__GFP_WRITE,            "__GFP_WRITE"},         \
        {(unsigned long)__GFP_RECLAIM,          "__GFP_RECLAIM"},       \
        {(unsigned long)__GFP_DIRECT_RECLAIM,   "__GFP_DIRECT_RECLAIM"},\
-       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"},\
-       {(unsigned long)__GFP_OTHER_NODE,       "__GFP_OTHER_NODE"}     \
+       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"}\
 
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
index 7ea4c5e7c4487a963acfc75c027514a6cb360169..288c0c54a2b4ace63a94549c136fe255e2982089 100644 (file)
@@ -11,16 +11,16 @@ TRACE_EVENT(swiotlb_bounced,
        TP_PROTO(struct device *dev,
                 dma_addr_t dev_addr,
                 size_t size,
-                int swiotlb_force),
+                enum swiotlb_force swiotlb_force),
 
        TP_ARGS(dev, dev_addr, size, swiotlb_force),
 
        TP_STRUCT__entry(
-               __string(       dev_name,       dev_name(dev)   )
-               __field(        u64,    dma_mask                )
-               __field(        dma_addr_t,     dev_addr        )
-               __field(        size_t, size                    )
-               __field(        int,    swiotlb_force           )
+               __string(       dev_name,       dev_name(dev)           )
+               __field(        u64,    dma_mask                        )
+               __field(        dma_addr_t,     dev_addr                )
+               __field(        size_t, size                            )
+               __field(        enum swiotlb_force,     swiotlb_force   )
        ),
 
        TP_fast_assign(
@@ -37,7 +37,10 @@ TRACE_EVENT(swiotlb_bounced,
                __entry->dma_mask,
                (unsigned long long)__entry->dev_addr,
                __entry->size,
-               __entry->swiotlb_force ? "swiotlb_force" : "" )
+               __print_symbolic(__entry->swiotlb_force,
+                       { SWIOTLB_NORMAL,       "NORMAL" },
+                       { SWIOTLB_FORCE,        "FORCE" },
+                       { SWIOTLB_NO_FORCE,     "NO_FORCE" }))
 );
 
 #endif /*  _TRACE_SWIOTLB_H */
index a8b93e68523941ca5615cbbe0da87f42d779db6d..f330ba4547cfd4c59da3d8f309e4ab76d8bd8b40 100644 (file)
@@ -414,6 +414,7 @@ header-y += telephony.h
 header-y += termios.h
 header-y += thermal.h
 header-y += time.h
+header-y += timerfd.h
 header-y += times.h
 header-y += timex.h
 header-y += tiocl.h
diff --git a/include/uapi/linux/timerfd.h b/include/uapi/linux/timerfd.h
new file mode 100644 (file)
index 0000000..6fcfaa8
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ *  include/linux/timerfd.h
+ *
+ *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _UAPI_LINUX_TIMERFD_H
+#define _UAPI_LINUX_TIMERFD_H
+
+#include <linux/types.h>
+
+/* For O_CLOEXEC and O_NONBLOCK */
+#include <linux/fcntl.h>
+
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
+/*
+ * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ *
+ * Also make sure to update the masks in include/linux/timerfd.h
+ * when adding new flags.
+ */
+#define TFD_TIMER_ABSTIME (1 << 0)
+#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+
+#define TFD_IOC_SET_TICKS      _IOW('T', 0, __u64)
+
+#endif /* _UAPI_LINUX_TIMERFD_H */
index acc63697a0cc415357792e922396a9ef296bb01f..b2a31a55a61237e65e4939a9d2722e136cc73ca0 100644 (file)
@@ -93,6 +93,7 @@ struct usb_ext_prop_desc {
  * |   0 | magic     | LE32         | FUNCTIONFS_DESCRIPTORS_MAGIC_V2      |
  * |   4 | length    | LE32         | length of the whole data chunk       |
  * |   8 | flags     | LE32         | combination of functionfs_flags      |
+ * |     | eventfd   | LE32         | eventfd file descriptor              |
  * |     | fs_count  | LE32         | number of full-speed descriptors     |
  * |     | hs_count  | LE32         | number of high-speed descriptors     |
  * |     | ss_count  | LE32         | number of super-speed descriptors    |
index 223b734abccdc3b7f3baae457261dd66862fcd1d..e1a937348a3ed2bb3a76820e1ffa6a542f6aa9fb 100644 (file)
@@ -1176,6 +1176,10 @@ config CGROUP_DEBUG
 
          Say N.
 
+config SOCK_CGROUP_DATA
+       bool
+       default n
+
 endif # CGROUPS
 
 config CHECKPOINT_RESTORE
index e08b948519223e62aca265284483ee7b1ef1420c..3ec5742b5640f5d265ea8f0d9b8f986cf25b8f1c 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1977,7 +1977,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                }
 
                rcu_read_lock();
-               sem_lock(sma, sops, nsops);
+               locknum = sem_lock(sma, sops, nsops);
 
                if (!ipc_valid_object(&sma->sem_perm))
                        goto out_unlock_free;
index 8b1dde96a0faa1bda645f0e03388f0a47f565bdc..7b44195da81bb25a080b165402b74cf8c7f59bcc 100644 (file)
@@ -231,9 +231,11 @@ static void untag_chunk(struct node *p)
        if (size)
                new = alloc_chunk(size);
 
+       mutex_lock(&entry->group->mark_mutex);
        spin_lock(&entry->lock);
        if (chunk->dead || !entry->inode) {
                spin_unlock(&entry->lock);
+               mutex_unlock(&entry->group->mark_mutex);
                if (new)
                        free_chunk(new);
                goto out;
@@ -251,6 +253,7 @@ static void untag_chunk(struct node *p)
                list_del_rcu(&chunk->hash);
                spin_unlock(&hash_lock);
                spin_unlock(&entry->lock);
+               mutex_unlock(&entry->group->mark_mutex);
                fsnotify_destroy_mark(entry, audit_tree_group);
                goto out;
        }
@@ -258,8 +261,8 @@ static void untag_chunk(struct node *p)
        if (!new)
                goto Fallback;
 
-       fsnotify_duplicate_mark(&new->mark, entry);
-       if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
+       if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
+                                    NULL, 1)) {
                fsnotify_put_mark(&new->mark);
                goto Fallback;
        }
@@ -293,6 +296,7 @@ static void untag_chunk(struct node *p)
                owner->root = new;
        spin_unlock(&hash_lock);
        spin_unlock(&entry->lock);
+       mutex_unlock(&entry->group->mark_mutex);
        fsnotify_destroy_mark(entry, audit_tree_group);
        fsnotify_put_mark(&new->mark);  /* drop initial reference */
        goto out;
@@ -309,6 +313,7 @@ Fallback:
        put_tree(owner);
        spin_unlock(&hash_lock);
        spin_unlock(&entry->lock);
+       mutex_unlock(&entry->group->mark_mutex);
 out:
        fsnotify_put_mark(entry);
        spin_lock(&hash_lock);
@@ -386,18 +391,21 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
 
        chunk_entry = &chunk->mark;
 
+       mutex_lock(&old_entry->group->mark_mutex);
        spin_lock(&old_entry->lock);
        if (!old_entry->inode) {
                /* old_entry is being shot, lets just lie */
                spin_unlock(&old_entry->lock);
+               mutex_unlock(&old_entry->group->mark_mutex);
                fsnotify_put_mark(old_entry);
                free_chunk(chunk);
                return -ENOENT;
        }
 
-       fsnotify_duplicate_mark(chunk_entry, old_entry);
-       if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
+       if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
+                                    old_entry->inode, NULL, 1)) {
                spin_unlock(&old_entry->lock);
+               mutex_unlock(&old_entry->group->mark_mutex);
                fsnotify_put_mark(chunk_entry);
                fsnotify_put_mark(old_entry);
                return -ENOSPC;
@@ -413,6 +421,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
                chunk->dead = 1;
                spin_unlock(&chunk_entry->lock);
                spin_unlock(&old_entry->lock);
+               mutex_unlock(&old_entry->group->mark_mutex);
 
                fsnotify_destroy_mark(chunk_entry, audit_tree_group);
 
@@ -445,6 +454,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        spin_unlock(&hash_lock);
        spin_unlock(&chunk_entry->lock);
        spin_unlock(&old_entry->lock);
+       mutex_unlock(&old_entry->group->mark_mutex);
        fsnotify_destroy_mark(old_entry, audit_tree_group);
        fsnotify_put_mark(chunk_entry); /* drop initial reference */
        fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
index a2ac051c342f87b1372a7aeb33ba816327ee1a43..229a5d5df9770fc66774bf5defea359873946d01 100644 (file)
@@ -56,7 +56,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
            attr->value_size == 0 || attr->map_flags)
                return ERR_PTR(-EINVAL);
 
-       if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
+       if (attr->value_size > KMALLOC_MAX_SIZE)
                /* if value_size is bigger, the user space won't be able to
                 * access the elements.
                 */
index 34debc1a9641875382b6603820f3e85c4a18dffc..3f2bb58952d8dfa4a2e082f9e6f9d277e19f0577 100644 (file)
@@ -274,7 +274,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
-       if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+       if (htab->map.value_size >= KMALLOC_MAX_SIZE -
            MAX_BPF_STACK - sizeof(struct htab_elem))
                /* if value_size is bigger, the user space won't be able to
                 * access the elements via bpf syscall. This check also makes
index a98e814f216f9c54f878bcfca32e4747be357c07..f97fe77ceb88aa39ff3bd01f497586f778c052a2 100644 (file)
@@ -318,6 +318,7 @@ bool has_capability(struct task_struct *t, int cap)
 {
        return has_ns_capability(t, &init_user_ns, cap);
 }
+EXPORT_SYMBOL(has_capability);
 
 /**
  * has_ns_capability_noaudit - Does a task have a capability (unaudited)
index 93ad6c1fb9b6212e706eb3ae08f7b881192008ec..a9b8cf50059151c17f63d35cf4c622ae8b72f131 100644 (file)
@@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
 
+void static_key_deferred_flush(struct static_key_deferred *key)
+{
+       STATIC_KEY_CHECK_USE();
+       flush_delayed_work(&key->work);
+}
+EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+
 void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
index b501e390bb34403c11d6ae09c31ea9fcb769ea8e..9ecedc28b928debb6a5988a5db8b76833133d9e4 100644 (file)
@@ -246,7 +246,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        /* pages are dead and unused, undo the arch mapping */
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       mem_hotplug_begin();
        arch_remove_memory(align_start, align_size);
+       mem_hotplug_done();
        untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
@@ -358,7 +360,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (error)
                goto err_pfn_remap;
 
+       mem_hotplug_begin();
        error = arch_add_memory(nid, align_start, align_size, true);
+       mem_hotplug_done();
        if (error)
                goto err_add_memory;
 
index ff046b73ff2d309ce00fbd3bd8949a8f6f5fd297..3603d93a19689be7188a004f2b999b27e0ebdf2f 100644 (file)
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
         * fresh group stop.  Read comment in do_signal_stop() for details.
         */
        if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
-               sig->flags = SIGNAL_STOP_STOPPED;
+               signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
                return true;
        }
        return false;
@@ -843,7 +843,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
                         * will take ->siglock, notice SIGNAL_CLD_MASK, and
                         * notify its parent. See get_signal_to_deliver().
                         */
-                       signal->flags = why | SIGNAL_STOP_CONTINUED;
+                       signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
                        signal->group_stop_count = 0;
                        signal->group_exit_code = 0;
                }
index b06848a104e6940e128e63a5d3c8c7fb39ffaf8b..eb9e9a7870fa7bdb0f373f858037c1026880c85f 100644 (file)
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
 
 config DEBUG_INFO_SPLIT
        bool "Produce split debuginfo in .dwo files"
-       depends on DEBUG_INFO
+       depends on DEBUG_INFO && !FRV
        help
          Generate debug info into separate .dwo files. This significantly
          reduces the build directory size for builds with DEBUG_INFO,
index 25f57230380104f419257ea43c1cd3d2e31d7e65..e68604ae3cedf41ce98bc06de2142629fa115cbd 100644 (file)
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
 }
 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 
+static inline void pipe_truncate(struct iov_iter *i)
+{
+       struct pipe_inode_info *pipe = i->pipe;
+       if (pipe->nrbufs) {
+               size_t off = i->iov_offset;
+               int idx = i->idx;
+               int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+               if (off) {
+                       pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
+                       idx = next_idx(idx, pipe);
+                       nrbufs++;
+               }
+               while (pipe->nrbufs > nrbufs) {
+                       pipe_buf_release(pipe, &pipe->bufs[idx]);
+                       idx = next_idx(idx, pipe);
+                       pipe->nrbufs--;
+               }
+       }
+}
+
 static void pipe_advance(struct iov_iter *i, size_t size)
 {
        struct pipe_inode_info *pipe = i->pipe;
-       struct pipe_buffer *buf;
-       int idx = i->idx;
-       size_t off = i->iov_offset, orig_sz;
-       
        if (unlikely(i->count < size))
                size = i->count;
-       orig_sz = size;
-
        if (size) {
+               struct pipe_buffer *buf;
+               size_t off = i->iov_offset, left = size;
+               int idx = i->idx;
                if (off) /* make it relative to the beginning of buffer */
-                       size += off - pipe->bufs[idx].offset;
+                       left += off - pipe->bufs[idx].offset;
                while (1) {
                        buf = &pipe->bufs[idx];
-                       if (size <= buf->len)
+                       if (left <= buf->len)
                                break;
-                       size -= buf->len;
+                       left -= buf->len;
                        idx = next_idx(idx, pipe);
                }
-               buf->len = size;
                i->idx = idx;
-               off = i->iov_offset = buf->offset + size;
-       }
-       if (off)
-               idx = next_idx(idx, pipe);
-       if (pipe->nrbufs) {
-               int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
-               /* [curbuf,unused) is in use.  Free [idx,unused) */
-               while (idx != unused) {
-                       pipe_buf_release(pipe, &pipe->bufs[idx]);
-                       idx = next_idx(idx, pipe);
-                       pipe->nrbufs--;
-               }
+               i->iov_offset = buf->offset + left;
        }
-       i->count -= orig_sz;
+       i->count -= size;
+       /* ... and discard everything past that point */
+       pipe_truncate(i);
 }
 
 void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
                        size_t count)
 {
        BUG_ON(direction != ITER_PIPE);
+       WARN_ON(pipe->nrbufs == pipe->buffers);
        i->type = direction;
        i->pipe = pipe;
        i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
index 6f382e07de77e2f543389c57e52f48a1f5d026ad..0b92d605fb69cc805a96c8333dab36174f755e22 100644 (file)
@@ -640,6 +640,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
                                update_node(node, private);
                }
 
+               WARN_ON_ONCE(!list_empty(&node->private_list));
                radix_tree_node_free(node);
        }
 }
@@ -666,6 +667,7 @@ static void delete_node(struct radix_tree_root *root,
                        root->rnode = NULL;
                }
 
+               WARN_ON_ONCE(!list_empty(&node->private_list));
                radix_tree_node_free(node);
 
                node = parent;
@@ -767,6 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
                        struct radix_tree_node *old = child;
                        offset = child->offset + 1;
                        child = child->parent;
+                       WARN_ON_ONCE(!list_empty(&node->private_list));
                        radix_tree_node_free(old);
                        if (old == entry_to_node(node))
                                return;
@@ -1824,15 +1827,19 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
  *     __radix_tree_delete_node    -    try to free node after clearing a slot
  *     @root:          radix tree root
  *     @node:          node containing @index
+ *     @update_node:   callback for changing leaf nodes
+ *     @private:       private data to pass to @update_node
  *
  *     After clearing the slot at @index in @node from radix tree
  *     rooted at @root, call this function to attempt freeing the
  *     node and shrinking the tree.
  */
 void __radix_tree_delete_node(struct radix_tree_root *root,
-                             struct radix_tree_node *node)
+                             struct radix_tree_node *node,
+                             radix_tree_update_node_t update_node,
+                             void *private)
 {
-       delete_node(root, node, NULL, NULL);
+       delete_node(root, node, update_node, private);
 }
 
 /**
index cb1b54ee8527241de289ab66f34f9a0a1efa761f..975b8fc4f1e1143dcdd295731cf1fc18cf0561fe 100644 (file)
@@ -53,7 +53,7 @@
  */
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 
-int swiotlb_force;
+enum swiotlb_force swiotlb_force;
 
 /*
  * Used to do a quick range check in swiotlb_tbl_unmap_single and
@@ -82,6 +82,12 @@ static phys_addr_t io_tlb_overflow_buffer;
 static unsigned int *io_tlb_list;
 static unsigned int io_tlb_index;
 
+/*
+ * Max segment that we can provide which (if pages are contingous) will
+ * not be bounced (unless SWIOTLB_FORCE is set).
+ */
+unsigned int max_segment;
+
 /*
  * We need to save away the original address corresponding to a mapped entry
  * for the sync operations.
@@ -106,8 +112,12 @@ setup_io_tlb_npages(char *str)
        }
        if (*str == ',')
                ++str;
-       if (!strcmp(str, "force"))
-               swiotlb_force = 1;
+       if (!strcmp(str, "force")) {
+               swiotlb_force = SWIOTLB_FORCE;
+       } else if (!strcmp(str, "noforce")) {
+               swiotlb_force = SWIOTLB_NO_FORCE;
+               io_tlb_nslabs = 1;
+       }
 
        return 0;
 }
@@ -120,6 +130,20 @@ unsigned long swiotlb_nr_tbl(void)
 }
 EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
 
+unsigned int swiotlb_max_segment(void)
+{
+       return max_segment;
+}
+EXPORT_SYMBOL_GPL(swiotlb_max_segment);
+
+void swiotlb_set_max_segment(unsigned int val)
+{
+       if (swiotlb_force == SWIOTLB_FORCE)
+               max_segment = 1;
+       else
+               max_segment = rounddown(val, PAGE_SIZE);
+}
+
 /* default to 64MB */
 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
 unsigned long swiotlb_size_or_default(void)
@@ -201,6 +225,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
        if (verbose)
                swiotlb_print_info();
 
+       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
        return 0;
 }
 
@@ -279,6 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
        rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
        if (rc)
                free_pages((unsigned long)vstart, order);
+
        return rc;
 }
 
@@ -333,6 +359,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 
        late_alloc = 1;
 
+       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+
        return 0;
 
 cleanup4:
@@ -347,6 +375,7 @@ cleanup2:
        io_tlb_end = 0;
        io_tlb_start = 0;
        io_tlb_nslabs = 0;
+       max_segment = 0;
        return -ENOMEM;
 }
 
@@ -375,6 +404,7 @@ void __init swiotlb_free(void)
                                   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
        }
        io_tlb_nslabs = 0;
+       max_segment = 0;
 }
 
 int is_swiotlb_buffer(phys_addr_t paddr)
@@ -543,8 +573,15 @@ static phys_addr_t
 map_single(struct device *hwdev, phys_addr_t phys, size_t size,
           enum dma_data_direction dir, unsigned long attrs)
 {
-       dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
+       dma_addr_t start_dma_addr;
 
+       if (swiotlb_force == SWIOTLB_NO_FORCE) {
+               dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
+                                    &phys);
+               return SWIOTLB_MAP_ERROR;
+       }
+
+       start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
        return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
                                      dir, attrs);
 }
@@ -721,6 +758,9 @@ static void
 swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
             int do_panic)
 {
+       if (swiotlb_force == SWIOTLB_NO_FORCE)
+               return;
+
        /*
         * Ran out of IOMMU space for this operation. This is very bad.
         * Unfortunately the drivers cannot handle this operation properly.
@@ -763,7 +803,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
         * we can safely return the device addr and not worry about bounce
         * buffering it.
         */
-       if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
+       if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
                return dev_addr;
 
        trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
@@ -904,7 +944,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
                phys_addr_t paddr = sg_phys(sg);
                dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
 
-               if (swiotlb_force ||
+               if (swiotlb_force == SWIOTLB_FORCE ||
                    !dma_capable(hwdev, dev_addr, sg->length)) {
                        phys_addr_t map = map_single(hwdev, sg_phys(sg),
                                                     sg->length, dir, attrs);
index d0e4d1002059360e50254ae2c87dc8f7a87a2dff..b772a33ef640ab0d6770bb3d249a6fe6f16eeebc 100644 (file)
@@ -138,7 +138,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
                                dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
                        /* Wakeup waiters for exceptional entry lock */
                        dax_wake_mapping_entry_waiter(mapping, page->index, p,
-                                                     false);
+                                                     true);
                }
        }
        __radix_tree_replace(&mapping->page_tree, node, slot, page,
index 10eedbf14421f29675d18e80569bafc9efd60763..9a6bd6c8d55a6691047e516a46c2cf6b931b912d 100644 (file)
@@ -883,15 +883,17 @@ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        pmd_t entry;
        unsigned long haddr;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
 
        vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
                goto unlock;
 
        entry = pmd_mkyoung(orig_pmd);
+       if (write)
+               entry = pmd_mkdirty(entry);
        haddr = vmf->address & HPAGE_PMD_MASK;
-       if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
-                               vmf->flags & FAULT_FLAG_WRITE))
+       if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
                update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
 
 unlock:
@@ -919,8 +921,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
        }
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
-               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
-                                              __GFP_OTHER_NODE, vma,
+               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
                                               vmf->address, page_to_nid(page));
                if (unlikely(!pages[i] ||
                             mem_cgroup_try_charge(pages[i], vma->vm_mm,
index 3edb759c5c7d15dfeacb4eb18e4a901602a0ee23..c7025c132670a4d8e3279d1ebb7730718fb6aa8a 100644 (file)
@@ -1773,23 +1773,32 @@ free:
 }
 
 /*
- * When releasing a hugetlb pool reservation, any surplus pages that were
- * allocated to satisfy the reservation must be explicitly freed if they were
- * never used.
- * Called with hugetlb_lock held.
+ * This routine has two main purposes:
+ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
+ *    in unused_resv_pages.  This corresponds to the prior adjustments made
+ *    to the associated reservation map.
+ * 2) Free any unused surplus pages that may have been allocated to satisfy
+ *    the reservation.  As many as unused_resv_pages may be freed.
+ *
+ * Called with hugetlb_lock held.  However, the lock could be dropped (and
+ * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
+ * we must make sure nobody else can claim pages we are in the process of
+ * freeing.  Do this by ensuring resv_huge_page always is greater than the
+ * number of huge pages we plan to free when dropping the lock.
  */
 static void return_unused_surplus_pages(struct hstate *h,
                                        unsigned long unused_resv_pages)
 {
        unsigned long nr_pages;
 
-       /* Uncommit the reservation */
-       h->resv_huge_pages -= unused_resv_pages;
-
        /* Cannot return gigantic pages currently */
        if (hstate_is_gigantic(h))
-               return;
+               goto out;
 
+       /*
+        * Part (or even all) of the reservation could have been backed
+        * by pre-allocated pages. Only free surplus pages.
+        */
        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 
        /*
@@ -1799,12 +1808,22 @@ static void return_unused_surplus_pages(struct hstate *h,
         * when the nodes with surplus pages have no free pages.
         * free_pool_huge_page() will balance the the freed pages across the
         * on-line nodes with memory and will handle the hstate accounting.
+        *
+        * Note that we decrement resv_huge_pages as we free the pages.  If
+        * we drop the lock, resv_huge_pages will still be sufficiently large
+        * to cover subsequent pages we may free.
         */
        while (nr_pages--) {
+               h->resv_huge_pages--;
+               unused_resv_pages--;
                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
-                       break;
+                       goto out;
                cond_resched_lock(&hugetlb_lock);
        }
+
+out:
+       /* Fully uncommit the reservation */
+       h->resv_huge_pages -= unused_resv_pages;
 }
 
 
index e32389a970305eaf60b36da896e2226a8ce18a0d..77ae3239c3de17bfbf7ba29b56a5cb270611cfd8 100644 (file)
@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        /* Only allocate from the target node */
-       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
+       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        /*
         * Before allocating the hugepage, release the mmap_sem read lock.
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
        struct vm_area_struct *vma;
        unsigned long addr;
        pmd_t *pmd, _pmd;
-       bool deposited = false;
 
        i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                        spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
                        /* assume page table is clear */
                        _pmd = pmdp_collapse_flush(vma, addr, pmd);
-                       /*
-                        * now deposit the pgtable for arch that need it
-                        * otherwise free it.
-                        */
-                       if (arch_needs_pgtable_deposit()) {
-                               /*
-                                * The deposit should be visibile only after
-                                * collapse is seen by others.
-                                */
-                               smp_wmb();
-                               pgtable_trans_huge_deposit(vma->vm_mm, pmd,
-                                                          pmd_pgtable(_pmd));
-                               deposited = true;
-                       }
                        spin_unlock(ptl);
                        up_write(&vma->vm_mm->mmap_sem);
-                       if (!deposited) {
-                               atomic_long_dec(&vma->vm_mm->nr_ptes);
-                               pte_free(vma->vm_mm, pmd_pgtable(_pmd));
-                       }
+                       atomic_long_dec(&vma->vm_mm->nr_ptes);
+                       pte_free(vma->vm_mm, pmd_pgtable(_pmd));
                }
        }
        i_mmap_unlock_write(mapping);
@@ -1326,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
 
        /* Only allocate from the target node */
-       gfp = alloc_hugepage_khugepaged_gfpmask() |
-               __GFP_OTHER_NODE | __GFP_THISNODE;
+       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
index 4048897e7b01a6e5e82333c0852629eded927ff3..a63a8f8326647b92bdc63810c8a93be96047f748 100644 (file)
@@ -625,8 +625,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
                                           int nid, unsigned int lru_mask)
 {
+       struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
        unsigned long nr = 0;
-       struct mem_cgroup_per_node *mz;
        enum lru_list lru;
 
        VM_BUG_ON((unsigned)nid >= nr_node_ids);
@@ -634,8 +634,7 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
        for_each_lru(lru) {
                if (!(BIT(lru) & lru_mask))
                        continue;
-               mz = mem_cgroup_nodeinfo(memcg, nid);
-               nr += mz->lru_size[lru];
+               nr += mem_cgroup_get_lru_size(lruvec, lru);
        }
        return nr;
 }
@@ -1002,6 +1001,7 @@ out:
  * mem_cgroup_update_lru_size - account for adding or removing an lru page
  * @lruvec: mem_cgroup per zone lru vector
  * @lru: index of lru list the page is sitting on
+ * @zid: zone id of the accounted pages
  * @nr_pages: positive when adding or negative when removing
  *
  * This function must be called under lru_lock, just before a page is added
@@ -1009,27 +1009,25 @@ out:
  * so as to allow it to check that lru_size 0 is consistent with list_empty).
  */
 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
-                               int nr_pages)
+                               int zid, int nr_pages)
 {
        struct mem_cgroup_per_node *mz;
        unsigned long *lru_size;
        long size;
-       bool empty;
 
        if (mem_cgroup_disabled())
                return;
 
        mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       lru_size = mz->lru_size + lru;
-       empty = list_empty(lruvec->lists + lru);
+       lru_size = &mz->lru_zone_size[zid][lru];
 
        if (nr_pages < 0)
                *lru_size += nr_pages;
 
        size = *lru_size;
-       if (WARN_ONCE(size < 0 || empty != !size,
-               "%s(%p, %d, %d): lru_size %ld but %sempty\n",
-               __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
+       if (WARN_ONCE(size < 0,
+               "%s(%p, %d, %d): lru_size %ld\n",
+               __func__, lruvec, lru, nr_pages, size)) {
                VM_BUG_ON(1);
                *lru_size = 0;
        }
index 7d23b505024846301d0e1bb77783489455c8c742..6bf2b471e30ca566a55160e4131bf7e7b9c3c4ea 100644 (file)
@@ -3008,13 +3008,6 @@ static int do_set_pmd(struct vm_fault *vmf, struct page *page)
        ret = 0;
        count_vm_event(THP_FILE_MAPPED);
 out:
-       /*
-        * If we are going to fallback to pte mapping, do a
-        * withdraw with pmd lock held.
-        */
-       if (arch_needs_pgtable_deposit() && ret == VM_FAULT_FALLBACK)
-               vmf->prealloc_pte = pgtable_trans_huge_withdraw(vma->vm_mm,
-                                                               vmf->pmd);
        spin_unlock(vmf->ptl);
        return ret;
 }
@@ -3055,20 +3048,18 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 
                ret = do_set_pmd(vmf, page);
                if (ret != VM_FAULT_FALLBACK)
-                       goto fault_handled;
+                       return ret;
        }
 
        if (!vmf->pte) {
                ret = pte_alloc_one_map(vmf);
                if (ret)
-                       goto fault_handled;
+                       return ret;
        }
 
        /* Re-check under ptl */
-       if (unlikely(!pte_none(*vmf->pte))) {
-               ret = VM_FAULT_NOPAGE;
-               goto fault_handled;
-       }
+       if (unlikely(!pte_none(*vmf->pte)))
+               return VM_FAULT_NOPAGE;
 
        flush_icache_page(vma, page);
        entry = mk_pte(page, vma->vm_page_prot);
@@ -3088,15 +3079,8 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 
        /* no need to invalidate: a not-present page won't be cached */
        update_mmu_cache(vma, vmf->address, vmf->pte);
-       ret = 0;
 
-fault_handled:
-       /* preallocated pagetable is unused: free it */
-       if (vmf->prealloc_pte) {
-               pte_free(vmf->vma->vm_mm, vmf->prealloc_pte);
-               vmf->prealloc_pte = 0;
-       }
-       return ret;
+       return 0;
 }
 
 
@@ -3360,15 +3344,24 @@ static int do_shared_fault(struct vm_fault *vmf)
 static int do_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
+       int ret;
 
        /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
        if (!vma->vm_ops->fault)
-               return VM_FAULT_SIGBUS;
-       if (!(vmf->flags & FAULT_FLAG_WRITE))
-               return do_read_fault(vmf);
-       if (!(vma->vm_flags & VM_SHARED))
-               return do_cow_fault(vmf);
-       return do_shared_fault(vmf);
+               ret = VM_FAULT_SIGBUS;
+       else if (!(vmf->flags & FAULT_FLAG_WRITE))
+               ret = do_read_fault(vmf);
+       else if (!(vma->vm_flags & VM_SHARED))
+               ret = do_cow_fault(vmf);
+       else
+               ret = do_shared_fault(vmf);
+
+       /* preallocated pagetable is unused: free it */
+       if (vmf->prealloc_pte) {
+               pte_free(vma->vm_mm, vmf->prealloc_pte);
+               vmf->prealloc_pte = 0;
+       }
+       return ret;
 }
 
 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
@@ -3779,8 +3772,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
-static int __follow_pte(struct mm_struct *mm, unsigned long address,
-               pte_t **ptepp, spinlock_t **ptlp)
+static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+               pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -3797,11 +3790,20 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
 
        pmd = pmd_offset(pud, address);
        VM_BUG_ON(pmd_trans_huge(*pmd));
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto out;
 
-       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
-       if (pmd_huge(*pmd))
+       if (pmd_huge(*pmd)) {
+               if (!pmdpp)
+                       goto out;
+
+               *ptlp = pmd_lock(mm, pmd);
+               if (pmd_huge(*pmd)) {
+                       *pmdpp = pmd;
+                       return 0;
+               }
+               spin_unlock(*ptlp);
+       }
+
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
                goto out;
 
        ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -3817,16 +3819,30 @@ out:
        return -EINVAL;
 }
 
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
-              spinlock_t **ptlp)
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, spinlock_t **ptlp)
+{
+       int res;
+
+       /* (void) is needed to make gcc happy */
+       (void) __cond_lock(*ptlp,
+                          !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
+                                          ptlp)));
+       return res;
+}
+
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        int res;
 
        /* (void) is needed to make gcc happy */
        (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte(mm, address, ptepp, ptlp)));
+                          !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
+                                          ptlp)));
        return res;
 }
+EXPORT_SYMBOL(follow_pte_pmd);
 
 /**
  * follow_pfn - look up PFN at a user virtual address
index 2c6d5f64feca409e9fdca4b551a240882e421476..d604d2596b7bed41b9748ee3242571b771db4d5e 100644 (file)
@@ -1864,14 +1864,14 @@ int move_freepages(struct zone *zone,
 #endif
 
        for (page = start_page; page <= end_page;) {
-               /* Make sure we are not inadvertently changing nodes */
-               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
                }
 
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
                if (!PageBuddy(page)) {
                        page++;
                        continue;
@@ -2583,30 +2583,22 @@ int __isolate_free_page(struct page *page, unsigned int order)
  * Update NUMA hit/miss statistics
  *
  * Must be called with interrupts disabled.
- *
- * When __GFP_OTHER_NODE is set assume the node of the preferred
- * zone is the local node. This is useful for daemons who allocate
- * memory on behalf of other processes.
  */
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
-                                                               gfp_t flags)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
 {
 #ifdef CONFIG_NUMA
-       int local_nid = numa_node_id();
        enum zone_stat_item local_stat = NUMA_LOCAL;
 
-       if (unlikely(flags & __GFP_OTHER_NODE)) {
+       if (z->node != numa_node_id())
                local_stat = NUMA_OTHER;
-               local_nid = preferred_zone->node;
-       }
 
-       if (z->node == local_nid) {
+       if (z->node == preferred_zone->node)
                __inc_zone_state(z, NUMA_HIT);
-               __inc_zone_state(z, local_stat);
-       } else {
+       else {
                __inc_zone_state(z, NUMA_MISS);
                __inc_zone_state(preferred_zone, NUMA_FOREIGN);
        }
+       __inc_zone_state(z, local_stat);
 #endif
 }
 
@@ -2674,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
        }
 
        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
-       zone_statistics(preferred_zone, zone, gfp_flags);
+       zone_statistics(preferred_zone, zone);
        local_irq_restore(flags);
 
        VM_BUG_ON_PAGE(bad_range(zone, page), page);
@@ -3904,8 +3896,8 @@ EXPORT_SYMBOL(free_pages);
  * drivers to provide a backing region of memory for use as either an
  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  */
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
-                                      gfp_t gfp_mask)
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
+                                            gfp_t gfp_mask)
 {
        struct page *page = NULL;
        gfp_t gfp = gfp_mask;
@@ -3925,22 +3917,23 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
        return page;
 }
 
-void __page_frag_drain(struct page *page, unsigned int order,
-                      unsigned int count)
+void __page_frag_cache_drain(struct page *page, unsigned int count)
 {
        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
        if (page_ref_sub_and_test(page, count)) {
+               unsigned int order = compound_order(page);
+
                if (order == 0)
                        free_hot_cold_page(page, false);
                else
                        __free_pages_ok(page, order);
        }
 }
-EXPORT_SYMBOL(__page_frag_drain);
+EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *__alloc_page_frag(struct page_frag_cache *nc,
-                       unsigned int fragsz, gfp_t gfp_mask)
+void *page_frag_alloc(struct page_frag_cache *nc,
+                     unsigned int fragsz, gfp_t gfp_mask)
 {
        unsigned int size = PAGE_SIZE;
        struct page *page;
@@ -3948,7 +3941,7 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
 
        if (unlikely(!nc->va)) {
 refill:
-               page = __page_frag_refill(nc, gfp_mask);
+               page = __page_frag_cache_refill(nc, gfp_mask);
                if (!page)
                        return NULL;
 
@@ -3991,19 +3984,19 @@ refill:
 
        return nc->va + offset;
 }
-EXPORT_SYMBOL(__alloc_page_frag);
+EXPORT_SYMBOL(page_frag_alloc);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
  */
-void __free_page_frag(void *addr)
+void page_frag_free(void *addr)
 {
        struct page *page = virt_to_head_page(addr);
 
        if (unlikely(put_page_testzero(page)))
                __free_pages_ok(page, compound_order(page));
 }
-EXPORT_SYMBOL(__free_page_frag);
+EXPORT_SYMBOL(page_frag_free);
 
 static void *make_alloc_exact(unsigned long addr, unsigned int order,
                size_t size)
index 29bc6c0dedd07020e9f433332f907620239343a5..4f2ec6bb46ebe949d2e19ff154faddc7e2526f02 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2457,7 +2457,6 @@ union freelist_init_state {
                unsigned int pos;
                unsigned int *list;
                unsigned int count;
-               unsigned int rand;
        };
        struct rnd_state rnd_state;
 };
@@ -2483,8 +2482,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
        } else {
                state->list = cachep->random_seq;
                state->count = count;
-               state->pos = 0;
-               state->rand = rand;
+               state->pos = rand % count;
                ret = true;
        }
        return ret;
@@ -2493,7 +2491,9 @@ static bool freelist_state_initialize(union freelist_init_state *state,
 /* Get the next entry on the list and randomize it using a random shift */
 static freelist_idx_t next_random_slot(union freelist_init_state *state)
 {
-       return (state->list[state->pos++] + state->rand) % state->count;
+       if (state->pos >= state->count)
+               state->pos = 0;
+       return state->list[state->pos++];
 }
 
 /* Swap two freelist entries */
index 1c6e0321205dd2d34abc7f39a0753c128eb7ae53..4761701d1721e63fb8334e3a63fcc11b9e5e6443 100644 (file)
@@ -943,11 +943,25 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
        count = page_trans_huge_mapcount(page, total_mapcount);
        if (count <= 1 && PageSwapCache(page)) {
                count += page_swapcount(page);
-               if (count == 1 && !PageWriteback(page)) {
+               if (count != 1)
+                       goto out;
+               if (!PageWriteback(page)) {
                        delete_from_swap_cache(page);
                        SetPageDirty(page);
+               } else {
+                       swp_entry_t entry;
+                       struct swap_info_struct *p;
+
+                       entry.val = page_private(page);
+                       p = swap_info_get(entry);
+                       if (p->flags & SWP_STABLE_WRITES) {
+                               spin_unlock(&p->lock);
+                               return false;
+                       }
+                       spin_unlock(&p->lock);
                }
        }
+out:
        return count <= 1;
 }
 
@@ -2448,6 +2462,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                error = -ENOMEM;
                goto bad_swap;
        }
+
+       if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
+               p->flags |= SWP_STABLE_WRITES;
+
        if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
                int cpu;
 
index 6aa5b01d3e757b9b462993f3421050fd8831ceea..532a2a750952daffed4e4242d36a449eb9ba8837 100644 (file)
@@ -242,6 +242,16 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
        return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
 }
 
+unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
+                                  int zone_idx)
+{
+       if (!mem_cgroup_disabled())
+               return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
+
+       return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
+                              NR_ZONE_LRU_BASE + lru);
+}
+
 /*
  * Add a shrinker callback to be called from the vm.
  */
@@ -1382,8 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
  * be complete before mem_cgroup_update_lru_size due to a santity check.
  */
 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
-                       enum lru_list lru, unsigned long *nr_zone_taken,
-                       unsigned long nr_taken)
+                       enum lru_list lru, unsigned long *nr_zone_taken)
 {
        int zid;
 
@@ -1392,11 +1401,11 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
                        continue;
 
                __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
-       }
-
 #ifdef CONFIG_MEMCG
-       mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
+               mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
 #endif
+       }
+
 }
 
 /*
@@ -1501,7 +1510,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        *nr_scanned = scan;
        trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
                                    nr_taken, mode, is_file_lru(lru));
-       update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
+       update_lru_sizes(lruvec, lru, nr_zone_taken);
        return nr_taken;
 }
 
@@ -2047,10 +2056,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
                if (!managed_zone(zone))
                        continue;
 
-               inactive_zone = zone_page_state(zone,
-                               NR_ZONE_LRU_BASE + (file * LRU_FILE));
-               active_zone = zone_page_state(zone,
-                               NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
+               inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
+               active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
 
                inactive -= min(inactive, inactive_zone);
                active -= min(active, active_zone);
index 241fa5d6b3b2fe155ed0f458b9d188a926a51e80..abb58ffa3c64cb330d7e3d7aca7897aab0fea6a4 100644 (file)
@@ -473,7 +473,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
        if (WARN_ON_ONCE(node->exceptional))
                goto out_invalid;
        inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
-       __radix_tree_delete_node(&mapping->page_tree, node);
+       __radix_tree_delete_node(&mapping->page_tree, node,
+                                workingset_update_node, mapping);
 
 out_invalid:
        spin_unlock(&mapping->tree_lock);
index a1005007224ca04ee673fb948776107d6ba075c4..a29bb4b41c50e3c55463eaee2a5dda1292b757f6 100644 (file)
@@ -258,10 +258,6 @@ config XPS
 config HWBM
        bool
 
-config SOCK_CGROUP_DATA
-       bool
-       default n
-
 config CGROUP_NET_PRIO
        bool "Network priority cgroup"
        depends on CGROUPS
index 019557d0a11d2434ff7186da5504917717bf7d2b..09cfe87f0a44d64dbbdb9209d8eb358efa68f887 100644 (file)
@@ -1059,7 +1059,9 @@ static void __exit lane_module_cleanup(void)
 {
        int i;
 
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("lec", atm_proc_root);
+#endif
 
        deregister_atm_ioctl(&lane_ioctl_ops);
 
index 8ca6a929bf1255cb432fd4bd59d34345d62e09c4..95087e6e8258366af95579bb308d1a6e18266f0e 100644 (file)
@@ -399,7 +399,7 @@ bridged_dnat:
                                br_nf_hook_thresh(NF_BR_PRE_ROUTING,
                                                  net, sk, skb, skb->dev,
                                                  NULL,
-                                                 br_nf_pre_routing_finish);
+                                                 br_nf_pre_routing_finish_bridge);
                                return 0;
                        }
                        ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
index 8db5a0b4b52061afe9cd44dead33cc69523d15df..07b307b0b414730688b64fdb2295b0fa1b721e51 100644 (file)
@@ -4441,7 +4441,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
            pinfo->nr_frags &&
            !PageHighMem(skb_frag_page(frag0))) {
                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
-               NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+               NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+                                                   skb_frag_size(frag0),
+                                                   skb->end - skb->tail);
        }
 }
 
index 8e0c0635ee975e1c71a126677f63ff220f7f0c44..fb55327dcfeabdaf3eeecc3a8d176ae215612649 100644 (file)
@@ -75,6 +75,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
        struct nlattr *nla;
        struct sk_buff *skb;
        unsigned long flags;
+       void *msg_header;
 
        al = sizeof(struct net_dm_alert_msg);
        al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -82,21 +83,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
 
        skb = genlmsg_new(al, GFP_KERNEL);
 
-       if (skb) {
-               genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
-                               0, NET_DM_CMD_ALERT);
-               nla = nla_reserve(skb, NLA_UNSPEC,
-                                 sizeof(struct net_dm_alert_msg));
-               msg = nla_data(nla);
-               memset(msg, 0, al);
-       } else {
-               mod_timer(&data->send_timer, jiffies + HZ / 10);
+       if (!skb)
+               goto err;
+
+       msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
+                                0, NET_DM_CMD_ALERT);
+       if (!msg_header) {
+               nlmsg_free(skb);
+               skb = NULL;
+               goto err;
+       }
+       nla = nla_reserve(skb, NLA_UNSPEC,
+                         sizeof(struct net_dm_alert_msg));
+       if (!nla) {
+               nlmsg_free(skb);
+               skb = NULL;
+               goto err;
        }
+       msg = nla_data(nla);
+       memset(msg, 0, al);
+       goto out;
 
+err:
+       mod_timer(&data->send_timer, jiffies + HZ / 10);
+out:
        spin_lock_irqsave(&data->lock, flags);
        swap(data->skb, skb);
        spin_unlock_irqrestore(&data->lock, flags);
 
+       if (skb) {
+               struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+               struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
+
+               genlmsg_end(skb, genlmsg_data(gnlh));
+       }
+
        return skb;
 }
 
index d6447dc1037151495d88064938dfb4e255377416..1b7673aac59d51a5f8b5ef3f2076f1440c017fae 100644 (file)
@@ -67,8 +67,8 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
  * The function will try to retrieve a be32 entity at
  * offset poff
  */
-__be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, void *data,
-                        int hlen)
+static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
+                               void *data, int hlen)
 {
        __be16 *u, _u;
 
@@ -468,8 +468,9 @@ ip_proto_again:
                        if (hdr->flags & GRE_ACK)
                                offset += sizeof(((struct pptp_gre_header *)0)->ack);
 
-                       ppp_hdr = skb_header_pointer(skb, nhoff + offset,
-                                                    sizeof(_ppp_hdr), _ppp_hdr);
+                       ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
+                                                    sizeof(_ppp_hdr),
+                                                    data, hlen, _ppp_hdr);
                        if (!ppp_hdr)
                                goto out_bad;
 
index 18b5aae99becf81c3aa55820d49332067a1c4fe7..75e3ea7bda08f39e07d515768b56678842e74c40 100644 (file)
@@ -3898,6 +3898,9 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
        u32 filter_mask;
        int err;
 
+       if (nlmsg_len(nlh) < sizeof(*ifsm))
+               return -EINVAL;
+
        ifsm = nlmsg_data(nlh);
        if (ifsm->ifindex > 0)
                dev = __dev_get_by_index(net, ifsm->ifindex);
@@ -3947,6 +3950,9 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
        cb->seq = net->dev_base_seq;
 
+       if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
+               return -EINVAL;
+
        ifsm = nlmsg_data(cb->nlh);
        filter_mask = ifsm->filter_mask;
        if (!filter_mask)
index 5a03730fbc1a84376985d4d68d8b80d8f38b0985..734c71468b013838516cfe8c744dcd0e797a6e2b 100644 (file)
@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 
        local_irq_save(flags);
        nc = this_cpu_ptr(&netdev_alloc_cache);
-       data = __alloc_page_frag(nc, fragsz, gfp_mask);
+       data = page_frag_alloc(nc, fragsz, gfp_mask);
        local_irq_restore(flags);
        return data;
 }
@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
-       return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+       return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 }
 
 void *napi_alloc_frag(unsigned int fragsz)
@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
        local_irq_save(flags);
 
        nc = this_cpu_ptr(&netdev_alloc_cache);
-       data = __alloc_page_frag(nc, len, gfp_mask);
+       data = page_frag_alloc(nc, len, gfp_mask);
        pfmemalloc = nc->pfmemalloc;
 
        local_irq_restore(flags);
@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
        if (sk_memalloc_socks())
                gfp_mask |= __GFP_MEMALLOC;
 
-       data = __alloc_page_frag(&nc->page, len, gfp_mask);
+       data = page_frag_alloc(&nc->page, len, gfp_mask);
        if (unlikely(!data))
                return NULL;
 
index f560e0826009851e79a1f8a8b90f4ded3cfc382d..4eca27dc5c9478e36120a5128a7c11d6208b45a9 100644 (file)
@@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
-  "sk_lock-AF_MAX"
+  "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
-  "slock-AF_MAX"
+  "slock-AF_QIPCRTR", "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
-  "clock-AF_MAX"
+  "clock-AF_QIPCRTR", "clock-AF_MAX"
 };
 
 /*
index 5fff951a0a4928ccf28fb681be86c7df6a05e94f..da38621245458bae2506b0c030d92315f1be5775 100644 (file)
@@ -394,9 +394,11 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
                        return err;
        }
 
-       err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
-       if (err)
-               return err;
+       if (dst->ds[0]) {
+               err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
+               if (err)
+                       return err;
+       }
 
        /* If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
@@ -433,7 +435,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
                dsa_ds_unapply(dst, ds);
        }
 
-       dsa_cpu_port_ethtool_restore(dst->ds[0]);
+       if (dst->ds[0])
+               dsa_cpu_port_ethtool_restore(dst->ds[0]);
 
        pr_info("DSA: tree %d unapplied\n", dst->tree);
        dst->applied = false;
index 3ff8938893ec85311012b55a27de10d9368b50f1..eae0332b0e8c1f861ce629ed9ce3ddc45802a6b8 100644 (file)
@@ -85,7 +85,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        if (tb)
                return tb;
 
-       if (id == RT_TABLE_LOCAL)
+       if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
                alias = fib_new_table(net, RT_TABLE_MAIN);
 
        tb = fib_trie_table(id, alias);
index 7a5b4c7d9a87b18051a94aa2e16e6b85c8d85aa6..eba1546b5031e6e280a558118e55b13befc4c8ea 100644 (file)
@@ -1618,8 +1618,13 @@ void fib_select_multipath(struct fib_result *res, int hash)
 void fib_select_path(struct net *net, struct fib_result *res,
                     struct flowi4 *fl4, int mp_hash)
 {
+       bool oif_check;
+
+       oif_check = (fl4->flowi4_oif == 0 ||
+                    fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
+       if (res->fi->fib_nhs > 1 && oif_check) {
                if (mp_hash < 0)
                        mp_hash = get_hash_from_flowi4(fl4) >> 1;
 
@@ -1629,7 +1634,7 @@ void fib_select_path(struct net *net, struct fib_result *res,
 #endif
        if (!res->prefixlen &&
            res->table->tb_num_default > 1 &&
-           res->type == RTN_UNICAST && !fl4->flowi4_oif)
+           res->type == RTN_UNICAST && oif_check)
                fib_select_default(fl4, res);
 
        if (!fl4->saddr)
index 68d622133f5386e621148da7330dcc747d186c6c..5b15459955f84cfc26dd2b12f129b1ee4014e62b 100644 (file)
@@ -219,9 +219,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
 static void igmp_gq_start_timer(struct in_device *in_dev)
 {
        int tv = prandom_u32() % in_dev->mr_maxdelay;
+       unsigned long exp = jiffies + tv + 2;
+
+       if (in_dev->mr_gq_running &&
+           time_after_eq(exp, (in_dev->mr_gq_timer).expires))
+               return;
 
        in_dev->mr_gq_running = 1;
-       if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
+       if (!mod_timer(&in_dev->mr_gq_timer, exp))
                in_dev_hold(in_dev);
 }
 
index 57e1405e82822543ed6fc788e8b7dc69091f2bb3..53ae0c6315ad03e46f93ae68cb930fff5848edcd 100644 (file)
@@ -1225,8 +1225,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                 * which has interface index (iif) as the first member of the
                 * underlying inet{6}_skb_parm struct. This code then overlays
                 * PKTINFO_SKB_CB and in_pktinfo also has iif as the first
-                * element so the iif is picked up from the prior IPCB
+                * element so the iif is picked up from the prior IPCB. If iif
+                * is the loopback interface, then return the sending interface
+                * (e.g., process binds socket to eth0 for Tx which is
+                * redirected to loopback in the rtable/dst).
                 */
+               if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
+                       pktinfo->ipi_ifindex = inet_iif(skb);
+
                pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
        } else {
                pktinfo->ipi_ifindex = 0;
index 21db00d0362bb60d48aed2c900b857f86cef5793..a6b8c1a4102ba7ab07efbcf504fa7ca4025c6f19 100644 (file)
@@ -144,7 +144,7 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
        rcu_read_lock_bh();
        c = __clusterip_config_find(net, clusterip);
        if (c) {
-               if (unlikely(!atomic_inc_not_zero(&c->refcount)))
+               if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
                        c = NULL;
                else if (entry)
                        atomic_inc(&c->entries);
@@ -166,14 +166,15 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
 
 static struct clusterip_config *
 clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
-                       struct net_device *dev)
+                     struct net_device *dev)
 {
+       struct net *net = dev_net(dev);
        struct clusterip_config *c;
-       struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id);
+       struct clusterip_net *cn = net_generic(net, clusterip_net_id);
 
        c = kzalloc(sizeof(*c), GFP_ATOMIC);
        if (!c)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        c->dev = dev;
        c->clusterip = ip;
@@ -185,6 +186,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
        atomic_set(&c->refcount, 1);
        atomic_set(&c->entries, 1);
 
+       spin_lock_bh(&cn->lock);
+       if (__clusterip_config_find(net, ip)) {
+               spin_unlock_bh(&cn->lock);
+               kfree(c);
+
+               return ERR_PTR(-EBUSY);
+       }
+
+       list_add_rcu(&c->list, &cn->configs);
+       spin_unlock_bh(&cn->lock);
+
 #ifdef CONFIG_PROC_FS
        {
                char buffer[16];
@@ -195,16 +207,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
                                          cn->procdir,
                                          &clusterip_proc_fops, c);
                if (!c->pde) {
+                       spin_lock_bh(&cn->lock);
+                       list_del_rcu(&c->list);
+                       spin_unlock_bh(&cn->lock);
                        kfree(c);
-                       return NULL;
+
+                       return ERR_PTR(-ENOMEM);
                }
        }
 #endif
 
-       spin_lock_bh(&cn->lock);
-       list_add_rcu(&c->list, &cn->configs);
-       spin_unlock_bh(&cn->lock);
-
        return c;
 }
 
@@ -410,9 +422,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
 
                        config = clusterip_config_init(cipinfo,
                                                        e->ip.dst.s_addr, dev);
-                       if (!config) {
+                       if (IS_ERR(config)) {
                                dev_put(dev);
-                               return -ENOMEM;
+                               return PTR_ERR(config);
                        }
                        dev_mc_add(config->dev, config->clustermac);
                }
index a82a11747b3f14c4567a6d8072dfc2f77421547a..0fcac8e7a2b2fb9fdb9f74bdcadf32bd177ceb39 100644 (file)
@@ -1914,7 +1914,8 @@ local_input:
                }
        }
 
-       rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type,
+       rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
+                          flags | RTCF_LOCAL, res.type,
                           IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
        if (!rth)
                goto e_nobufs;
index 22cbd61079b5a9d2661583b7d96eea46eddb685d..b2fa498b15d173739d0ebc5b6dd0577bf8dc4c08 100644 (file)
@@ -951,7 +951,7 @@ static struct ctl_table ipv4_net_table[] = {
                .data           = &init_net.ipv4.sysctl_tcp_notsent_lowat,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_douintvec,
        },
        {
                .procname       = "tcp_tw_reuse",
index d46f4d5b1c62edf95791e9d47d966c3bc61e1888..ba8f02d0f283c6eaaf14ed89103adea135093353 100644 (file)
@@ -606,7 +606,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
 
 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
 {
index 89c59e656f44939863ceada610d3442d2de666ba..fc7b4017ba241f9dd39d49bd6258ecd4a16e3a3a 100644 (file)
@@ -191,6 +191,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        ops = rcu_dereference(inet6_offloads[proto]);
        if (!ops || !ops->callbacks.gro_receive) {
                __pskb_pull(skb, skb_gro_offset(skb));
+               skb_gro_frag0_invalidate(skb);
                proto = ipv6_gso_pull_exthdrs(skb, proto);
                skb_gro_pull(skb, -skb_transport_offset(skb));
                skb_reset_transport_header(skb);
index 70d0de4041972ceaeb4656fa4bdef884a5403b10..38122d04fadc646c27a5ccdf0eef5eb6d7923a27 100644 (file)
@@ -1373,7 +1373,7 @@ emsgsize:
         */
 
        cork->length += length;
-       if (((length > mtu) ||
+       if ((((length + fragheaderlen) > mtu) ||
             (skb && skb_is_gso(skb))) &&
            (sk->sk_protocol == IPPROTO_UDP) &&
            (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
index f4b4a4a5f4ba740f4a290e2394034fe335941abe..d82042c8d8fd4b38eac12a58eb634438aab726a7 100644 (file)
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
        int err;
 
+       dev->rtnl_link_ops = &vti6_link_ops;
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
 
        strcpy(t->parms.name, dev->name);
-       dev->rtnl_link_ops = &vti6_link_ops;
 
        dev_hold(dev);
        vti6_tnl_link(ip6n, t);
index 8417c41d8ec8398a72c56f9b37b16d94702cbbf3..ce5aaf448c541079e8e08b170d5d24d4f499d7d8 100644 (file)
@@ -1464,7 +1464,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
        struct fib6_node *fn;
 
        /* Get the "current" route for this destination and
-        * check if the redirect has come from approriate router.
+        * check if the redirect has come from appropriate router.
         *
         * RFC 4861 specifies that redirects should only be
         * accepted if they come from the nexthop to the target.
@@ -2768,7 +2768,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
           old MTU is the lowest MTU in the path, update the route PMTU
           to reflect the increase. In this case if the other nodes' MTU
           also have the lowest MTU, TOO BIG MESSAGE will be lead to
-          PMTU discouvery.
+          PMTU discovery.
         */
        if (rt->dst.dev == arg->dev &&
            dst_metric_raw(&rt->dst, RTAX_MTU) &&
index cfb9e5f4e28f52c699072eaf41f12f3a6e667160..13190b38f22ee5116fb7701feed22ec436031a4e 100644 (file)
@@ -1044,7 +1044,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct iucv_sock *iucv = iucv_sk(sk);
-       size_t headroom, linear;
+       size_t headroom = 0;
+       size_t linear;
        struct sk_buff *skb;
        struct iucv_message txmsg = {0};
        struct cmsghdr *cmsg;
@@ -1122,18 +1123,20 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
         * this is fine for SOCK_SEQPACKET (unless we want to support
         * segmented records using the MSG_EOR flag), but
         * for SOCK_STREAM we might want to improve it in future */
-       headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
-                  ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
-       if (headroom + len < PAGE_SIZE) {
+       if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+               headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
                linear = len;
        } else {
-               /* In nonlinear "classic" iucv skb,
-                * reserve space for iucv_array
-                */
-               if (iucv->transport != AF_IUCV_TRANS_HIPER)
-                       headroom += sizeof(struct iucv_array) *
-                                   (MAX_SKB_FRAGS + 1);
-               linear = PAGE_SIZE - headroom;
+               if (len < PAGE_SIZE) {
+                       linear = len;
+               } else {
+                       /* In nonlinear "classic" iucv skb,
+                        * reserve space for iucv_array
+                        */
+                       headroom = sizeof(struct iucv_array) *
+                                  (MAX_SKB_FRAGS + 1);
+                       linear = PAGE_SIZE - headroom;
+               }
        }
        skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
                                   noblock, &err, 0);
index 8938b6ba57a037b6f95f9dcfd2461d7c3951cdfb..3d73278b86ca34bfbd774dc8f52e490169445e1b 100644 (file)
@@ -47,7 +47,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
        return (struct l2tp_ip_sock *)sk;
 }
 
-static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
+static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
+                                         __be32 raddr, int dif, u32 tunnel_id)
 {
        struct sock *sk;
 
@@ -61,6 +62,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
                if ((l2tp->conn_id == tunnel_id) &&
                    net_eq(sock_net(sk), net) &&
                    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
+                   (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
                    (!sk->sk_bound_dev_if || !dif ||
                     sk->sk_bound_dev_if == dif))
                        goto found;
@@ -71,15 +73,6 @@ found:
        return sk;
 }
 
-static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
-{
-       struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
-       if (sk)
-               sock_hold(sk);
-
-       return sk;
-}
-
 /* When processing receive frames, there are two cases to
  * consider. Data frames consist of a non-zero session-id and an
  * optional cookie. Control frames consist of a regular L2TP header
@@ -183,8 +176,8 @@ pass_up:
                struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
                read_lock_bh(&l2tp_ip_lock);
-               sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
-                                          tunnel_id);
+               sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
+                                          inet_iif(skb), tunnel_id);
                if (!sk) {
                        read_unlock_bh(&l2tp_ip_lock);
                        goto discard;
@@ -280,7 +273,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                inet->inet_saddr = 0;  /* Use device */
 
        write_lock_bh(&l2tp_ip_lock);
-       if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+       if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
                                  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
                write_unlock_bh(&l2tp_ip_lock);
                ret = -EADDRINUSE;
index f092ac441fdda5cdaf294005d28d586c216d2f26..331ccf5a7bad80e011997e071489d7775b0c68c6 100644 (file)
@@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
 
 static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
                                           struct in6_addr *laddr,
+                                          const struct in6_addr *raddr,
                                           int dif, u32 tunnel_id)
 {
        struct sock *sk;
 
        sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
-               const struct in6_addr *addr = inet6_rcv_saddr(sk);
+               const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
+               const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
                struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
                if (l2tp == NULL)
@@ -72,7 +74,8 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
 
                if ((l2tp->conn_id == tunnel_id) &&
                    net_eq(sock_net(sk), net) &&
-                   (!addr || ipv6_addr_equal(addr, laddr)) &&
+                   (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
+                   (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
                    (!sk->sk_bound_dev_if || !dif ||
                     sk->sk_bound_dev_if == dif))
                        goto found;
@@ -83,17 +86,6 @@ found:
        return sk;
 }
 
-static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
-                                               struct in6_addr *laddr,
-                                               int dif, u32 tunnel_id)
-{
-       struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
-       if (sk)
-               sock_hold(sk);
-
-       return sk;
-}
-
 /* When processing receive frames, there are two cases to
  * consider. Data frames consist of a non-zero session-id and an
  * optional cookie. Control frames consist of a regular L2TP header
@@ -197,8 +189,8 @@ pass_up:
                struct ipv6hdr *iph = ipv6_hdr(skb);
 
                read_lock_bh(&l2tp_ip6_lock);
-               sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
-                                           tunnel_id);
+               sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
+                                           inet6_iif(skb), tunnel_id);
                if (!sk) {
                        read_unlock_bh(&l2tp_ip6_lock);
                        goto discard;
@@ -330,7 +322,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rcu_read_unlock();
 
        write_lock_bh(&l2tp_ip6_lock);
-       if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+       if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
                                   addr->l2tp_conn_id)) {
                write_unlock_bh(&l2tp_ip6_lock);
                err = -EADDRINUSE;
index 2c21b7039136fe467845df5494432b58a6a226fc..0d8b716e509edca48fbc65ceaf06e6b7d6a2c189 100644 (file)
@@ -3287,7 +3287,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
        int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
        int hw_headroom = sdata->local->hw.extra_tx_headroom;
        struct ethhdr eth;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *info;
        struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
        struct ieee80211_tx_data tx;
        ieee80211_tx_result r;
@@ -3351,6 +3351,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
        memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
        memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
 
+       info = IEEE80211_SKB_CB(skb);
        memset(info, 0, sizeof(*info));
        info->band = fast_tx->band;
        info->control.vif = &sdata->vif;
index a019a87e58ee8151620eb3d8500979d02dba54c1..0db5f9782265ebb033f10d07da815495e8a7d278 100644 (file)
@@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
         * is called on error from nf_tables_newrule().
         */
        expr = nft_expr_first(rule);
-       while (expr->ops && expr != nft_expr_last(rule)) {
+       while (expr != nft_expr_last(rule) && expr->ops) {
                nf_tables_expr_destroy(ctx, expr);
                expr = nft_expr_next(expr);
        }
index 36d2b10965464cd8abc3ad57326aefb00b003971..7d699bbd45b0eaae1574a094f54a85d8219d390a 100644 (file)
@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
        return 0;
 }
 
+static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+                                __wsum fsum, __wsum tsum, int csum_offset)
+{
+       __sum16 sum;
+
+       if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               return -1;
+
+       nft_csum_replace(&sum, fsum, tsum);
+       if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
+           skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               return -1;
+
+       return 0;
+}
+
 static void nft_payload_set_eval(const struct nft_expr *expr,
                                 struct nft_regs *regs,
                                 const struct nft_pktinfo *pkt)
@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        const u32 *src = &regs->data[priv->sreg];
        int offset, csum_offset;
        __wsum fsum, tsum;
-       __sum16 sum;
 
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        csum_offset = offset + priv->csum_offset;
        offset += priv->offset;
 
-       if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+       if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
            (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
             skb->ip_summed != CHECKSUM_PARTIAL)) {
-               if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
-                       goto err;
-
                fsum = skb_checksum(skb, offset, priv->len, 0);
                tsum = csum_partial(src, priv->len, 0);
-               nft_csum_replace(&sum, fsum, tsum);
 
-               if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
-                   skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+                   nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
                        goto err;
 
                if (priv->csum_flags &&
index 3e19fa1230dc6b9274090257be97b91827699485..dbb6aaff67ec5c151f8c8c6333721421f8e85076 100644 (file)
@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
 
        if (priv->queues_total > 1) {
                if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
-                       int cpu = smp_processor_id();
+                       int cpu = raw_smp_processor_id();
 
                        queue = priv->queuenum + cpu % priv->queues_total;
                } else {
index bd6efc53f26d01d8c8ac246c36fa4b1cf970ce31..2d6fe3559912674385e7679557fc31ddeb901b38 100644 (file)
@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
 static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
                             bool reset)
 {
+       u64 consumed, consumed_cap;
        u32 flags = priv->flags;
-       u64 consumed;
-
-       if (reset) {
-               consumed = atomic64_xchg(&priv->consumed, 0);
-               if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
-                       flags |= NFT_QUOTA_F_DEPLETED;
-       } else {
-               consumed = atomic64_read(&priv->consumed);
-       }
 
        /* Since we inconditionally increment consumed quota for each packet
         * that we see, don't go over the quota boundary in what we send to
         * userspace.
         */
-       if (consumed > priv->quota)
-               consumed = priv->quota;
+       consumed = atomic64_read(&priv->consumed);
+       if (consumed >= priv->quota) {
+               consumed_cap = priv->quota;
+               flags |= NFT_QUOTA_F_DEPLETED;
+       } else {
+               consumed_cap = consumed;
+       }
 
        if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
                         NFTA_QUOTA_PAD) ||
-           nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed),
+           nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
                         NFTA_QUOTA_PAD) ||
            nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
                goto nla_put_failure;
+
+       if (reset) {
+               atomic64_sub(consumed, &priv->consumed);
+               clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
+       }
        return 0;
 
 nla_put_failure:
index 28c56b95fb7ff6ebaf5a99d8cfcbf583a01e73f5..ea7c67050792c9093c28b397e5ae164a570a0e33 100644 (file)
@@ -1502,10 +1502,7 @@ static int __init netlbl_init(void)
        printk(KERN_INFO "NetLabel: Initializing\n");
        printk(KERN_INFO "NetLabel:  domain hash size = %u\n",
               (1 << NETLBL_DOMHSH_BITSIZE));
-       printk(KERN_INFO "NetLabel:  protocols ="
-              " UNLABELED"
-              " CIPSOv4"
-              "\n");
+       printk(KERN_INFO "NetLabel:  protocols = UNLABELED CIPSOv4 CALIPSO\n");
 
        ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE);
        if (ret_val != 0)
index c985ecbe9bd62a81899d43eb802003d6bb4ebe08..ae5ac175b2bef96ffa614bc799db5cd90a7bdc08 100644 (file)
@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
        const int pkt_len = 20;
        struct qrtr_hdr *hdr;
        struct sk_buff *skb;
-       u32 *buf;
+       __le32 *buf;
 
        skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
        if (!skb)
@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
        hdr->dst_node_id = cpu_to_le32(dst_node);
        hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
 
-       buf = (u32 *)skb_put(skb, pkt_len);
+       buf = (__le32 *)skb_put(skb, pkt_len);
        memset(buf, 0, pkt_len);
        buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
        buf[1] = cpu_to_le32(src_node);
index 333f8e26843128b4bd03a1c6409a21064f7c56e5..970db7a41684aa2a494b97663f91ca932308de05 100644 (file)
@@ -153,10 +153,14 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 
                switch (ip_tunnel_info_af(info)) {
                case AF_INET:
+                       skb_key.enc_control.addr_type =
+                               FLOW_DISSECTOR_KEY_IPV4_ADDRS;
                        skb_key.enc_ipv4.src = key->u.ipv4.src;
                        skb_key.enc_ipv4.dst = key->u.ipv4.dst;
                        break;
                case AF_INET6:
+                       skb_key.enc_control.addr_type =
+                               FLOW_DISSECTOR_KEY_IPV6_ADDRS;
                        skb_key.enc_ipv6.src = key->u.ipv6.src;
                        skb_key.enc_ipv6.dst = key->u.ipv6.dst;
                        break;
index e54082699520ae40d8ab4c282c230f3b3787db92..34efaa4ef2f6acfbed9b490f948f214e91a5606c 100644 (file)
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                             (new_transport->state == SCTP_PF)))
                                new_transport = asoc->peer.active_path;
                        if (new_transport->state == SCTP_UNCONFIRMED) {
-                               WARN_ONCE(1, "Atempt to send packet on unconfirmed path.");
+                               WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
                                sctp_chunk_fail(chunk, 0);
                                sctp_chunk_free(chunk);
                                continue;
index 8487bf136e5c4f54801777c4ef0f4e51b765cd62..0758e13754e2faccb257d2f6ba9cca7b2da1baab 100644 (file)
@@ -533,11 +533,11 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
        return used;
 }
 
-int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
 {
        int err = simple_setattr(dentry, iattr);
 
-       if (!err) {
+       if (!err && (iattr->ia_valid & ATTR_UID)) {
                struct socket *sock = SOCKET_I(d_inode(dentry));
 
                sock->sk->sk_uid = iattr->ia_uid;
index 3df85a751a85285f609c5583e7ad864c7c822560..ef5eff93a8b817dda5fa41bebf4fe4963663a9d6 100644 (file)
@@ -14502,13 +14502,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
        list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                bool schedule_destroy_work = false;
-               bool schedule_scan_stop = false;
                struct cfg80211_sched_scan_request *sched_scan_req =
                        rcu_dereference(rdev->sched_scan_req);
 
                if (sched_scan_req && notify->portid &&
-                   sched_scan_req->owner_nlportid == notify->portid)
-                       schedule_scan_stop = true;
+                   sched_scan_req->owner_nlportid == notify->portid) {
+                       sched_scan_req->owner_nlportid = 0;
+
+                       if (rdev->ops->sched_scan_stop &&
+                           rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+                               schedule_work(&rdev->sched_scan_stop_wk);
+               }
 
                list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
                        cfg80211_mlme_unregister_socket(wdev, notify->portid);
@@ -14539,12 +14543,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
                                spin_unlock(&rdev->destroy_list_lock);
                                schedule_work(&rdev->destroy_work);
                        }
-               } else if (schedule_scan_stop) {
-                       sched_scan_req->owner_nlportid = 0;
-
-                       if (rdev->ops->sched_scan_stop &&
-                           rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
-                               schedule_work(&rdev->sched_scan_stop_wk);
                }
        }
 
index a6d2a43bbf2e290368410a6338abefecfa114f54..b124f62ed6cb30b0e89a2d698dc6a346b65e6b62 100644 (file)
@@ -105,4 +105,11 @@ config SAMPLE_BLACKFIN_GPTIMERS
        help
          Build samples of blackfin gptimers sample module.
 
+config SAMPLE_VFIO_MDEV_MTTY
+       tristate "Build VFIO mtty example mediated device sample code -- loadable modules only"
+       depends on VFIO_MDEV_DEVICE && m
+       help
+         Build a virtual tty sample driver for use as a VFIO
+         mediated device
+
 endif # SAMPLES
index e17d66d77f099c48f88bdc8518ca34c45b39cf29..86a137e451d978bec15778e13766648b6cfb3f18 100644 (file)
@@ -2,4 +2,5 @@
 
 obj-$(CONFIG_SAMPLES)  += kobject/ kprobes/ trace_events/ livepatch/ \
                           hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
-                          configfs/ connector/ v4l/ trace_printk/ blackfin/
+                          configfs/ connector/ v4l/ trace_printk/ blackfin/ \
+                          vfio-mdev/
index a932edbe38eb9eba5a1b3429bb67cc9c99d0d3a6..cbbd868a50a8b145bf018329542f4fde829e06da 100644 (file)
@@ -1,13 +1 @@
-#
-# Makefile for mtty.c file
-#
-KERNEL_DIR:=/lib/modules/$(shell uname -r)/build
-
-obj-m:=mtty.o
-
-modules clean modules_install:
-       $(MAKE) -C $(KERNEL_DIR) SUBDIRS=$(PWD) $@
-
-default: modules
-
-module: modules
+obj-$(CONFIG_SAMPLE_VFIO_MDEV_MTTY) += mtty.o
index 6b633a4ea33399aa32e6f3655b05c980aeb33403..ca495686b9c31786d0df8c16407bd06d28ee1b40 100644 (file)
@@ -164,7 +164,7 @@ static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
        struct mdev_state *mds;
 
        list_for_each_entry(mds, &mdev_devices_list, next) {
-               if (uuid_le_cmp(mds->mdev->uuid, uuid) == 0)
+               if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
                        return mds;
        }
 
@@ -341,7 +341,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
                                pr_err("Serial port %d: Fifo level trigger\n",
                                        index);
 #endif
-                               mtty_trigger_interrupt(mdev_state->mdev->uuid);
+                               mtty_trigger_interrupt(
+                                               mdev_uuid(mdev_state->mdev));
                        }
                } else {
 #if defined(DEBUG_INTR)
@@ -355,7 +356,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
                         */
                        if (mdev_state->s[index].uart_reg[UART_IER] &
                                                                UART_IER_RLSI)
-                               mtty_trigger_interrupt(mdev_state->mdev->uuid);
+                               mtty_trigger_interrupt(
+                                               mdev_uuid(mdev_state->mdev));
                }
                mutex_unlock(&mdev_state->rxtx_lock);
                break;
@@ -374,7 +376,8 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
                                pr_err("Serial port %d: IER_THRI write\n",
                                        index);
 #endif
-                               mtty_trigger_interrupt(mdev_state->mdev->uuid);
+                               mtty_trigger_interrupt(
+                                               mdev_uuid(mdev_state->mdev));
                        }
 
                        mutex_unlock(&mdev_state->rxtx_lock);
@@ -445,7 +448,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
 #if defined(DEBUG_INTR)
                        pr_err("Serial port %d: MCR_OUT2 write\n", index);
 #endif
-                       mtty_trigger_interrupt(mdev_state->mdev->uuid);
+                       mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
                }
 
                if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
@@ -453,7 +456,7 @@ static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
 #if defined(DEBUG_INTR)
                        pr_err("Serial port %d: MCR RTS/DTR write\n", index);
 #endif
-                       mtty_trigger_interrupt(mdev_state->mdev->uuid);
+                       mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
                }
                break;
 
@@ -504,7 +507,8 @@ static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
 #endif
                        if (mdev_state->s[index].uart_reg[UART_IER] &
                                                         UART_IER_THRI)
-                               mtty_trigger_interrupt(mdev_state->mdev->uuid);
+                               mtty_trigger_interrupt(
+                                       mdev_uuid(mdev_state->mdev));
                }
                mutex_unlock(&mdev_state->rxtx_lock);
 
@@ -734,7 +738,7 @@ int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
 
        for (i = 0; i < 2; i++) {
                snprintf(name, MTTY_STRING_LEN, "%s-%d",
-                       dev_driver_string(mdev->parent->dev), i + 1);
+                       dev_driver_string(mdev_parent_dev(mdev)), i + 1);
                if (!strcmp(kobj->name, name)) {
                        nr_ports = i + 1;
                        break;
@@ -1069,7 +1073,7 @@ int mtty_get_region_info(struct mdev_device *mdev,
 {
        unsigned int size = 0;
        struct mdev_state *mdev_state;
-       int bar_index;
+       u32 bar_index;
 
        if (!mdev)
                return -EINVAL;
@@ -1078,8 +1082,11 @@ int mtty_get_region_info(struct mdev_device *mdev,
        if (!mdev_state)
                return -EINVAL;
 
-       mutex_lock(&mdev_state->ops_lock);
        bar_index = region_info->index;
+       if (bar_index >= VFIO_PCI_NUM_REGIONS)
+               return -EINVAL;
+
+       mutex_lock(&mdev_state->ops_lock);
 
        switch (bar_index) {
        case VFIO_PCI_CONFIG_REGION_INDEX:
@@ -1176,7 +1183,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
 
                memcpy(&mdev_state->dev_info, &info, sizeof(info));
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               if (copy_to_user((void __user *)arg, &info, minsz))
+                       return -EFAULT;
+
+               return 0;
        }
        case VFIO_DEVICE_GET_REGION_INFO:
        {
@@ -1197,7 +1207,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
                if (ret)
                        return ret;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               if (copy_to_user((void __user *)arg, &info, minsz))
+                       return -EFAULT;
+
+               return 0;
        }
 
        case VFIO_DEVICE_GET_IRQ_INFO:
@@ -1217,10 +1230,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
                if (ret)
                        return ret;
 
-               if (info.count == -1)
-                       return -EINVAL;
+               if (copy_to_user((void __user *)arg, &info, minsz))
+                       return -EFAULT;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return 0;
        }
        case VFIO_DEVICE_SET_IRQS:
        {
@@ -1298,10 +1311,8 @@ static ssize_t
 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
                     char *buf)
 {
-       struct mdev_device *mdev = to_mdev_device(dev);
-
-       if (mdev)
-               return sprintf(buf, "This is MDEV %s\n", dev_name(&mdev->dev));
+       if (mdev_from_dev(dev))
+               return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
 
        return sprintf(buf, "\n");
 }
@@ -1402,7 +1413,7 @@ struct attribute_group *mdev_type_groups[] = {
        NULL,
 };
 
-struct parent_ops mdev_fops = {
+struct mdev_parent_ops mdev_fops = {
        .owner                  = THIS_MODULE,
        .dev_attr_groups        = mtty_dev_groups,
        .mdev_attr_groups       = mdev_dev_groups,
@@ -1447,6 +1458,7 @@ static int __init mtty_dev_init(void)
 
        if (IS_ERR(mtty_dev.vd_class)) {
                pr_err("Error: failed to register mtty_dev class\n");
+               ret = PTR_ERR(mtty_dev.vd_class);
                goto failed1;
        }
 
@@ -1458,7 +1470,8 @@ static int __init mtty_dev_init(void)
        if (ret)
                goto failed2;
 
-       if (mdev_register_device(&mtty_dev.dev, &mdev_fops) != 0)
+       ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
+       if (ret)
                goto failed3;
 
        mutex_init(&mdev_list_lock);
index 950fd2e64bb73b9f261188bba272ea7e7ec10249..12262c0cc6914e6a5eebac1b887b129a15fb7466 100644 (file)
@@ -39,6 +39,9 @@
 #include "hash-map.h"
 #endif
 
+#if BUILDING_GCC_VERSION >= 7000
+#include "memmodel.h"
+#endif
 #include "emit-rtl.h"
 #include "debug.h"
 #include "target.h"
@@ -91,6 +94,9 @@
 #include "tree-ssa-alias.h"
 #include "tree-ssa.h"
 #include "stringpool.h"
+#if BUILDING_GCC_VERSION >= 7000
+#include "tree-vrp.h"
+#endif
 #include "tree-ssanames.h"
 #include "print-tree.h"
 #include "tree-eh.h"
@@ -287,6 +293,22 @@ static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct c
        return NULL;
 }
 
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+       cgraph_node_ptr alias;
+
+       if (callback(node, data))
+               return true;
+
+       for (alias = node->same_body; alias; alias = alias->next) {
+               if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE)
+                       if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable))
+                               return true;
+       }
+
+       return false;
+}
+
 #define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
        for ((node) = cgraph_first_function_with_gimple_body(); (node); \
                (node) = cgraph_next_function_with_gimple_body(node))
@@ -399,6 +421,7 @@ typedef union gimple_statement_d gassign;
 typedef union gimple_statement_d gcall;
 typedef union gimple_statement_d gcond;
 typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d ggoto;
 typedef union gimple_statement_d gphi;
 typedef union gimple_statement_d greturn;
 
@@ -452,6 +475,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
        return stmt;
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return stmt;
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return stmt;
@@ -496,6 +529,14 @@ static inline const greturn *as_a_const_greturn(const_gimple stmt)
 
 typedef struct rtx_def rtx_insn;
 
+static inline const char *get_decl_section_name(const_tree decl)
+{
+       if (DECL_SECTION_NAME(decl) == NULL_TREE)
+               return NULL;
+
+       return TREE_STRING_POINTER(DECL_SECTION_NAME(decl));
+}
+
 static inline void set_decl_section_name(tree node, const char *value)
 {
        if (value)
@@ -511,6 +552,7 @@ typedef struct gimple_statement_base gassign;
 typedef struct gimple_statement_call gcall;
 typedef struct gimple_statement_base gcond;
 typedef struct gimple_statement_base gdebug;
+typedef struct gimple_statement_base ggoto;
 typedef struct gimple_statement_phi gphi;
 typedef struct gimple_statement_base greturn;
 
@@ -564,6 +606,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
        return stmt;
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return stmt;
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return as_a<gphi>(stmt);
@@ -611,6 +663,11 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
 
 #define INSN_DELETED_P(insn) (insn)->deleted()
 
+static inline const char *get_decl_section_name(const_tree decl)
+{
+       return DECL_SECTION_NAME(decl);
+}
+
 /* symtab/cgraph related */
 #define debug_cgraph_node(node) (node)->debug()
 #define cgraph_get_node(decl) cgraph_node::get(decl)
@@ -619,6 +676,7 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
 #define cgraph_n_nodes symtab->cgraph_count
 #define cgraph_max_uid symtab->cgraph_max_uid
 #define varpool_get_node(decl) varpool_node::get(decl)
+#define dump_varpool_node(file, node) (node)->dump(file)
 
 #define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
        (caller)->create_edge((callee), (call_stmt), (count), (freq))
@@ -674,6 +732,11 @@ static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
        return node->get_alias_target();
 }
 
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+       return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable);
+}
+
 static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
 {
        return symtab->add_cgraph_insertion_hook(hook, data);
@@ -729,6 +792,13 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
        return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
 }
 
+template <>
+template <>
+inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
+{
+       return gs->code == GIMPLE_GOTO;
+}
+
 template <>
 template <>
 inline bool is_a_helper<const greturn *>::test(const_gimple gs)
@@ -766,6 +836,16 @@ static inline const gcall *as_a_const_gcall(const_gimple stmt)
        return as_a<const gcall *>(stmt);
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return as_a<ggoto *>(stmt);
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return as_a<const ggoto *>(stmt);
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return as_a<gphi *>(stmt);
@@ -828,4 +908,9 @@ static inline void debug_gimple_stmt(const_gimple s)
 #define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
 #endif
 
+#if BUILDING_GCC_VERSION >= 7000
+#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning)  \
+       get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep)
+#endif
+
 #endif
index 12541126575b1e2416ad48d58f1d01f8e08a5d93..8ff203ad48093f57fccf04bb9d7c9b9e1952603b 100644 (file)
@@ -328,9 +328,9 @@ static enum tree_code get_op(tree *rhs)
                        op = LROTATE_EXPR;
                        /*
                         * This code limits the value of random_const to
-                        * the size of a wide int for the rotation
+                        * the size of a long for the rotation
                         */
-                       random_const &= HOST_BITS_PER_WIDE_INT - 1;
+                       random_const %= TYPE_PRECISION(long_unsigned_type_node);
                        break;
                }
 
index ee47924aef0df676223975a08df37ad055cc1f51..827161bc269cfa1414972692bcdb45d0a42ad9ed 100644 (file)
@@ -117,7 +117,7 @@ destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream)
                conn = &efw->in_conn;
 
        amdtp_stream_destroy(stream);
-       cmp_connection_destroy(&efw->out_conn);
+       cmp_connection_destroy(conn);
 }
 
 static int
index 4ad3bd7fd4453e3a64fc4cd95001165510292559..f1657a4e0621ef4999349477ce6e71fdbcd7f411 100644 (file)
@@ -343,7 +343,7 @@ int snd_tscm_stream_init_duplex(struct snd_tscm *tscm)
        if (err < 0)
                amdtp_stream_destroy(&tscm->rx_stream);
 
-       return 0;
+       return err;
 }
 
 /* At bus reset, streaming is stopped and some registers are clear. */
index 9448daff9d8b55d5de49ef9a4ff66fd9e2a80f6b..7d660ee1d5e84e6f7eaa42404232d9aad461b25c 100644 (file)
@@ -2230,6 +2230,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
        SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
        SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+       SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
        SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
        SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -6983,6 +6984,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
        SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+       SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
index efe3a44658d5a5e6d3a82d5f23ed8f8afd190d5a..4576f987a4a5fec34a1bb780e0ba1ccb71dfeada 100644 (file)
@@ -561,9 +561,9 @@ static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
        nau8825_xtalk_backup(nau8825);
        /* Config IIS as master to output signal by codec */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
-               NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+               NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
                NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
-               (0x2 << NAU8825_I2S_DRV_SFT) | 0x1);
+               (0x2 << NAU8825_I2S_LRC_DIV_SFT) | 0x1);
        /* Ramp up headphone volume to 0dB to get better performance and
         * avoid pop noise in headphone.
         */
@@ -657,7 +657,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
                NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
        /* Recover default value for IIS */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
-               NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+               NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
                NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
        /* Restore value of specific register for cross talk */
        nau8825_xtalk_restore(nau8825);
@@ -2006,7 +2006,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
                        NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
        /* FLL pre-scaler */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
-                       NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
+                       NAU8825_FLL_REF_DIV_MASK,
+                       fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
        /* select divided VCO input */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
                NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
index 5d1704e732415dd04134f44ddab9971156c97de1..514fd13c2f4624bb96b237422dfd63fae52adf17 100644 (file)
 #define NAU8825_FLL_CLK_SRC_FS                 (0x3 << NAU8825_FLL_CLK_SRC_SFT)
 
 /* FLL4 (0x07) */
-#define NAU8825_FLL_REF_DIV_MASK               (0x3 << 10)
+#define NAU8825_FLL_REF_DIV_SFT        10
+#define NAU8825_FLL_REF_DIV_MASK       (0x3 << NAU8825_FLL_REF_DIV_SFT)
 
 /* FLL5 (0x08) */
 #define NAU8825_FLL_PDB_DAC_EN         (0x1 << 15)
 
 /* I2S_PCM_CTRL2 (0x1d) */
 #define NAU8825_I2S_TRISTATE   (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
-#define NAU8825_I2S_DRV_SFT    12
-#define NAU8825_I2S_DRV_MASK   (0x3 << NAU8825_I2S_DRV_SFT)
+#define NAU8825_I2S_LRC_DIV_SFT        12
+#define NAU8825_I2S_LRC_DIV_MASK       (0x3 << NAU8825_I2S_LRC_DIV_SFT)
 #define NAU8825_I2S_MS_SFT     3
 #define NAU8825_I2S_MS_MASK    (1 << NAU8825_I2S_MS_SFT)
 #define NAU8825_I2S_MS_MASTER  (1 << NAU8825_I2S_MS_SFT)
index 10c2a564a715dc82e198a4bb50c5691662685c7a..1ac96ef9ee2077dc70ba940f939e8162ce1dec26 100644 (file)
@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                }
        }
 
+       regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
+               RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
+
        if (rt5645->pdata.jd_invert) {
                regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
                        RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
index 8877b74b0510fedefe81adc6637c9d72cf9c722f..bb94d50052d7a7233282b50afca134e4f2f7cf3f 100644 (file)
@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
        { 108, 0x00 }, { 109, 0x00 },
 };
 
+static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case AIC3X_RESET:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static const struct regmap_config aic3x_regmap = {
        .reg_bits = 8,
        .val_bits = 8,
@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
        .max_register = DAC_ICC_ADJ,
        .reg_defaults = aic3x_reg,
        .num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+
+       .volatile_reg = aic3x_volatile_reg,
+
        .cache_type = REGCACHE_RBTREE,
 };
 
index 593b7d1aed4695bbf6e538fa924bc6263901041f..d72ccef9e238d39807657597d1c8496022718bb5 100644 (file)
@@ -1551,7 +1551,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
        const struct wmfw_region *region;
        const struct wm_adsp_region *mem;
        const char *region_name;
-       char *file, *text;
+       char *file, *text = NULL;
        struct wm_adsp_buf *buf;
        unsigned int reg;
        int regions = 0;
@@ -1700,10 +1700,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                         regions, le32_to_cpu(region->len), offset,
                         region_name);
 
+               if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+                   firmware->size) {
+                       adsp_err(dsp,
+                                "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+                                file, regions, region_name,
+                                le32_to_cpu(region->len), firmware->size);
+                       ret = -EINVAL;
+                       goto out_fw;
+               }
+
                if (text) {
                        memcpy(text, region->data, le32_to_cpu(region->len));
                        adsp_info(dsp, "%s: %s\n", file, text);
                        kfree(text);
+                       text = NULL;
                }
 
                if (reg) {
@@ -1748,6 +1759,7 @@ out_fw:
        regmap_async_complete(regmap);
        wm_adsp_buf_free(&buf_list);
        release_firmware(firmware);
+       kfree(text);
 out:
        kfree(file);
 
@@ -2233,6 +2245,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                }
 
                if (reg) {
+                       if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+                           firmware->size) {
+                               adsp_err(dsp,
+                                        "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+                                        file, blocks, region_name,
+                                        le32_to_cpu(blk->len),
+                                        firmware->size);
+                               ret = -EINVAL;
+                               goto out_fw;
+                       }
+
                        buf = wm_adsp_buf_alloc(blk->data,
                                                le32_to_cpu(blk->len),
                                                &buf_list);
index 2998954a1c7459e02c3397956e690a276120f06d..bdf8398cbc81b68b445f202636e8949db5e5cae5 100644 (file)
@@ -681,22 +681,19 @@ static int dw_i2s_probe(struct platform_device *pdev)
        }
 
        if (!pdata) {
-               ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
-               if (ret == -EPROBE_DEFER) {
-                       dev_err(&pdev->dev,
-                               "failed to register PCM, deferring probe\n");
-                       return ret;
-               } else if (ret) {
-                       dev_err(&pdev->dev,
-                               "Could not register DMA PCM: %d\n"
-                               "falling back to PIO mode\n", ret);
+               if (irq >= 0) {
                        ret = dw_pcm_register(pdev);
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "Could not register PIO PCM: %d\n",
+                       dev->use_pio = true;
+               } else {
+                       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
+                                       0);
+                       dev->use_pio = false;
+               }
+
+               if (ret) {
+                       dev_err(&pdev->dev, "could not register pcm: %d\n",
                                        ret);
-                               goto err_clk_disable;
-                       }
+                       goto err_clk_disable;
                }
        }
 
index 50349437d9615107a3a08fa481b6b2d61612e51c..fde08660b63be270f95436424e32e443bdb8a0de 100644 (file)
@@ -224,6 +224,12 @@ struct fsl_ssi_soc_data {
  * @dbg_stats: Debugging statistics
  *
  * @soc: SoC specific data
+ *
+ * @fifo_watermark: the FIFO watermark setting.  Notifies DMA when
+ *             there are @fifo_watermark or fewer words in TX fifo or
+ *             @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: max number of words to transfer in one go.  So far,
+ *             this is always the same as fifo_watermark.
  */
 struct fsl_ssi_private {
        struct regmap *regs;
@@ -263,6 +269,9 @@ struct fsl_ssi_private {
 
        const struct fsl_ssi_soc_data *soc;
        struct device *dev;
+
+       u32 fifo_watermark;
+       u32 dma_maxburst;
 };
 
 /*
@@ -1051,21 +1060,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
        regmap_write(regs, CCSR_SSI_SRCR, srcr);
        regmap_write(regs, CCSR_SSI_SCR, scr);
 
-       /*
-        * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
-        * use FIFO 1. We program the transmit water to signal a DMA transfer
-        * if there are only two (or fewer) elements left in the FIFO. Two
-        * elements equals one frame (left channel, right channel). This value,
-        * however, depends on the depth of the transmit buffer.
-        *
-        * We set the watermark on the same level as the DMA burstsize.  For
-        * fiq it is probably better to use the biggest possible watermark
-        * size.
-        */
-       if (ssi_private->use_dma)
-               wm = ssi_private->fifo_depth - 2;
-       else
-               wm = ssi_private->fifo_depth;
+       wm = ssi_private->fifo_watermark;
 
        regmap_write(regs, CCSR_SSI_SFCSR,
                        CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
@@ -1373,12 +1368,8 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
                dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
                         PTR_ERR(ssi_private->baudclk));
 
-       /*
-        * We have burstsize be "fifo_depth - 2" to match the SSI
-        * watermark setting in fsl_ssi_startup().
-        */
-       ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
-       ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+       ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
+       ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
        ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
        ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
 
@@ -1543,6 +1534,47 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                 /* Older 8610 DTs didn't have the fifo-depth property */
                ssi_private->fifo_depth = 8;
 
+       /*
+        * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
+        * use FIFO 1 but set the watermark appropriately nontheless.
+        * We program the transmit water to signal a DMA transfer
+        * if there are N elements left in the FIFO. For chips with 15-deep
+        * FIFOs, set watermark to 8.  This allows the SSI to operate at a
+        * high data rate without channel slipping. Behavior is unchanged
+        * for the older chips with a fifo depth of only 8.  A value of 4
+        * might be appropriate for the older chips, but is left at
+        * fifo_depth-2 until sombody has a chance to test.
+        *
+        * We set the watermark on the same level as the DMA burstsize.  For
+        * fiq it is probably better to use the biggest possible watermark
+        * size.
+        */
+       switch (ssi_private->fifo_depth) {
+       case 15:
+               /*
+                * 2 samples is not enough when running at high data
+                * rates (like 48kHz @ 16 bits/channel, 16 channels)
+                * 8 seems to split things evenly and leave enough time
+                * for the DMA to fill the FIFO before it's over/under
+                * run.
+                */
+               ssi_private->fifo_watermark = 8;
+               ssi_private->dma_maxburst = 8;
+               break;
+       case 8:
+       default:
+               /*
+                * maintain old behavior for older chips.
+                * Keeping it the same because I don't have an older
+                * board to test with.
+                * I suspect this could be changed to be something to
+                * leave some more space in the fifo.
+                */
+               ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
+               ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+               break;
+       }
+
        dev_set_drvdata(&pdev->dev, ssi_private);
 
        if (ssi_private->soc->imx) {
index 507a86a5eafe5646bb9731f865b85571eb6ffdfc..8d2fb2d6f532c833726e5dfbac2fe191eb95a15e 100644 (file)
@@ -142,7 +142,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
                 * for Jack detection and button press
                 */
                ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
-                                            0,
+                                            48000 * 512,
                                             SND_SOC_CLOCK_IN);
                if (!ret) {
                        if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
@@ -825,10 +825,20 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
        if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
                priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
                if (IS_ERR(priv->mclk)) {
+                       ret_val = PTR_ERR(priv->mclk);
+
                        dev_err(&pdev->dev,
-                               "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
-                               PTR_ERR(priv->mclk));
-                       return PTR_ERR(priv->mclk);
+                               "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+                               ret_val);
+
+                       /*
+                        * Fall back to bit clock usage for -ENOENT (clock not
+                        * available likely due to missing dependencies), bail
+                        * for all other errors, including -EPROBE_DEFER
+                        */
+                       if (ret_val != -ENOENT)
+                               return ret_val;
+                       byt_rt5640_quirk &= ~BYT_RT5640_MCLK_EN;
                }
        }
 
index 84b5101e6ca691df0e0896b59a95e07a55e3b5e7..6c6b63a6b338f50f0eb218f231506f0017e56241 100644 (file)
@@ -180,6 +180,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
        snd_pcm_set_sync(substream);
 
        mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+       if (!mconfig)
+               return -EINVAL;
+
        skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
 
        return 0;
index 8fc3178bc79cf975809d7c808c0b03cb1d91f846..b30bd384c8d38338d50994e9a92673aba123824d 100644 (file)
@@ -515,6 +515,9 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw);
 
 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
 {
+
+       if (ctx->dsp->fw)
+               release_firmware(ctx->dsp->fw);
        skl_clear_module_table(ctx->dsp);
        skl_freeup_uuid_list(ctx);
        skl_ipc_free(&ctx->ipc);
index 4bd68de761309b57285bf203aa2ee46cabc440cc..99b5b0835c1e840c66e45676ff81cd57a4c1875c 100644 (file)
@@ -1030,10 +1030,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
                return -ENOMEM;
 
        ret = snd_ctl_add(card, kctrl);
-       if (ret < 0) {
-               snd_ctl_free_one(kctrl);
+       if (ret < 0)
                return ret;
-       }
 
        cfg->update = update;
        cfg->card = card;
index f1901bb1466ec67b12189713c66ad040f1ae75c7..baa1afa41e3dd57fdc36655b7d3bbd147ade820f 100644 (file)
@@ -1748,6 +1748,7 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
 
        component->init = aux_dev->init;
        component->auxiliary = 1;
+       list_add(&component->card_aux_list, &card->aux_comp_list);
 
        return 0;
 
@@ -1758,16 +1759,14 @@ err_defer:
 
 static int soc_probe_aux_devices(struct snd_soc_card *card)
 {
-       struct snd_soc_component *comp;
+       struct snd_soc_component *comp, *tmp;
        int order;
        int ret;
 
        for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
                order++) {
-               list_for_each_entry(comp, &card->component_dev_list, card_list) {
-                       if (!comp->auxiliary)
-                               continue;
-
+               list_for_each_entry_safe(comp, tmp, &card->aux_comp_list,
+                                        card_aux_list) {
                        if (comp->driver->probe_order == order) {
                                ret = soc_probe_component(card, comp);
                                if (ret < 0) {
@@ -1776,6 +1775,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
                                                comp->name, ret);
                                        return ret;
                                }
+                               list_del(&comp->card_aux_list);
                        }
                }
        }
index e7a1eaa2772f4418534d094e576b424a76aaa844..6aba14009c92abc853d72575ed447e934ed5e458 100644 (file)
@@ -2184,9 +2184,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
                break;
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
                break;
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
+               break;
        }
 
 out:
index 65670b2b408cca0d4bffb2931cb7443cd4ba75b1..fbfb1fab88d5be60de7c40ed8fa1dd78d6fae727 100644 (file)
@@ -514,13 +514,12 @@ static void remove_widget(struct snd_soc_component *comp,
                            == SND_SOC_TPLG_TYPE_MIXER)
                                kfree(kcontrol->tlv.p);
 
-                       snd_ctl_remove(card, kcontrol);
-
                        /* Private value is used as struct soc_mixer_control
                         * for volume mixers or soc_bytes_ext for bytes
                         * controls.
                         */
                        kfree((void *)kcontrol->private_value);
+                       snd_ctl_remove(card, kcontrol);
                }
                kfree(w->kcontrol_news);
        }
index 15d1d5c63c3c40faa5f62316370ea9d8e711fa75..c90607ebe155b4dce4cfa8aa75fc14ecab953d52 100644 (file)
@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
        if (unlikely(atomic_read(&ep->chip->shutdown)))
                goto exit_clear;
 
+       if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+               goto exit_clear;
+
        if (usb_pipeout(ep->pipe)) {
                retire_outbound_urb(ep, ctx);
                /* can be stopped during retire callback */
@@ -534,6 +537,11 @@ static int wait_clear_urbs(struct snd_usb_endpoint *ep)
                        alive, ep->ep_num);
        clear_bit(EP_FLAG_STOPPING, &ep->flags);
 
+       ep->data_subs = NULL;
+       ep->sync_slave = NULL;
+       ep->retire_data_urb = NULL;
+       ep->prepare_data_urb = NULL;
+
        return 0;
 }
 
@@ -912,9 +920,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
 /**
  * snd_usb_endpoint_start: start an snd_usb_endpoint
  *
- * @ep:                the endpoint to start
- * @can_sleep: flag indicating whether the operation is executed in
- *             non-atomic context
+ * @ep: the endpoint to start
  *
  * A call to this function will increment the use count of the endpoint.
  * In case it is not already running, the URBs for this endpoint will be
@@ -924,7 +930,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
  *
  * Returns an error if the URB submission failed, 0 in all other cases.
  */
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
 {
        int err;
        unsigned int i;
@@ -938,8 +944,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
 
        /* just to be sure */
        deactivate_urbs(ep, false);
-       if (can_sleep)
-               wait_clear_urbs(ep);
 
        ep->active_mask = 0;
        ep->unlink_mask = 0;
@@ -1020,10 +1024,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
 
        if (--ep->use_count == 0) {
                deactivate_urbs(ep, false);
-               ep->data_subs = NULL;
-               ep->sync_slave = NULL;
-               ep->retire_data_urb = NULL;
-               ep->prepare_data_urb = NULL;
                set_bit(EP_FLAG_STOPPING, &ep->flags);
        }
 }
index 6428392d8f6244688e65d8be3d04bc9c30c68ce5..584f295d7c7738a6b91753c8c48eed0bf101a200 100644 (file)
@@ -18,7 +18,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
                                struct audioformat *fmt,
                                struct snd_usb_endpoint *sync_ep);
 
-int  snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep);
+int  snd_usb_endpoint_start(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
index 34c6d4f2c0b62967656b1687594161142bedf127..9aa5b18554812a949503375e23764b3d926a0f2d 100644 (file)
@@ -218,7 +218,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
        }
 }
 
-static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
+static int start_endpoints(struct snd_usb_substream *subs)
 {
        int err;
 
@@ -231,7 +231,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
                dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
 
                ep->data_subs = subs;
-               err = snd_usb_endpoint_start(ep, can_sleep);
+               err = snd_usb_endpoint_start(ep);
                if (err < 0) {
                        clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
                        return err;
@@ -260,7 +260,7 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
                dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
 
                ep->sync_slave = subs->data_endpoint;
-               err = snd_usb_endpoint_start(ep, can_sleep);
+               err = snd_usb_endpoint_start(ep);
                if (err < 0) {
                        clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
                        return err;
@@ -850,7 +850,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
        /* for playback, submit the URBs now; otherwise, the first hwptr_done
         * updates for all URBs would happen at the same time when starting */
        if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
-               ret = start_endpoints(subs, true);
+               ret = start_endpoints(subs);
 
  unlock:
        snd_usb_unlock_shutdown(subs->stream->chip);
@@ -1666,7 +1666,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
-               err = start_endpoints(subs, false);
+               err = start_endpoints(subs);
                if (err < 0)
                        return err;
 
index b3fd2382fdd9ed62f102e210489eadd17f4e3b8f..eb4b9f7a571e0f154fd7e00b81ef2c5ff7a17130 100644 (file)
@@ -1135,6 +1135,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+       case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
        case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
index 35a02f8e5a4aa90548c643c0de80e85b07a0b3bb..915869e00d863af03ca409f0f9ffa98147c45385 100644 (file)
@@ -655,7 +655,6 @@ static const struct {
        { "__GFP_RECLAIM",              "R" },
        { "__GFP_DIRECT_RECLAIM",       "DR" },
        { "__GFP_KSWAPD_RECLAIM",       "KR" },
-       { "__GFP_OTHER_NODE",           "ON" },
 };
 
 static size_t max_gfp_len;
index 71b05891a6a14adfe7537e70333352b844966134..831022b12848de68695065040b38bd7ec55f7577 100644 (file)
@@ -90,7 +90,7 @@ ifdef INSTALL_PATH
        done;
 
        @# Ask all targets to emit their test scripts
-       echo "#!/bin/bash" > $(ALL_SCRIPT)
+       echo "#!/bin/sh" > $(ALL_SCRIPT)
        echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
        echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
 
index 92e627adf3540f5c79ca4a2d372f81c74fa3bf8b..6d58cca8e23574a1fe15993d1da8f1558ecf8e06 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 SRC_TREE=../../../../
 
index c09a682df56ae9fb3cc91214ae9d79da19714633..16058bbea7a8501324ce6c9c639f5219d69979ba 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 echo "--------------------"
 echo "running socket test"
index bdd58c78902e96948f92475f4898de1ec0c14a57..df9e0a0cdf294e1e334c6558c9c4f87a38387e1d 100644 (file)
@@ -1367,7 +1367,7 @@ void run_tests_once(void)
                tracing_off();
                close_test_fds();
 
-               printf("test %2d PASSED (itertation %d)\n", test_nr, iteration_nr);
+               printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
                dprintf1("======================\n\n");
        }
        iteration_nr++;
index 17a513268325d197b76aa36d7c26c337a70b9a4d..0b87e71c00fcc9c6cb0de2248b191e89228bf9b6 100644 (file)
@@ -5,8 +5,10 @@
 klibcdirs:;
 PHONY += klibcdirs
 
-suffix_y = $(CONFIG_INITRAMFS_COMPRESSION)
-AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/initramfs_data.cpio$(suffix_y)"
+suffix_y = $(subst $\",,$(CONFIG_INITRAMFS_COMPRESSION))
+datafile_y = initramfs_data.cpio$(suffix_y)
+AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+
 
 # Generate builtin.o based on initramfs_data.o
 obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
@@ -14,7 +16,7 @@ obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
 # initramfs_data.o contains the compressed initramfs_data.cpio image.
 # The image is included using .incbin, a dependency which is not
 # tracked automatically.
-$(obj)/initramfs_data.o: $(obj)/initramfs_data.cpio$(suffix_y) FORCE
+$(obj)/initramfs_data.o: $(obj)/$(datafile_y) FORCE
 
 #####
 # Generate the initramfs cpio archive
@@ -38,10 +40,8 @@ endif
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 \
-       initramfs_data.cpio.lzma initramfs_data.cpio.xz \
-       initramfs_data.cpio.lzo initramfs_data.cpio.lz4 \
-       initramfs_data.cpio
+targets := $(datafile_y)
+
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
@@ -51,6 +51,6 @@ $(deps_initramfs): klibcdirs
 # 2) There are changes in which files are included (added or deleted)
 # 3) If gen_init_cpio are newer than initramfs_data.cpio
 # 4) arguments to gen_initramfs.sh changes
-$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
+$(obj)/$(datafile_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
        $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
        $(call if_changed,initfs)
index 52abac4bb6a2532ad55f4ec690530a6a7b593df0..6d2fcd6fcb2509e801c84b88e6c18e3e7a1d1fa0 100644 (file)
@@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
        mutex_lock(&lock);
 
        list_for_each_entry(tmp, &consumers, node) {
-               if (tmp->token == consumer->token) {
+               if (tmp->token == consumer->token || tmp == consumer) {
                        mutex_unlock(&lock);
                        module_put(THIS_MODULE);
                        return -EBUSY;
@@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
        mutex_lock(&lock);
 
        list_for_each_entry(tmp, &consumers, node) {
-               if (tmp->token != consumer->token)
+               if (tmp != consumer)
                        continue;
 
                list_for_each_entry(producer, &producers, node) {