Merge tag 'clk-for-linus' of git://git.linaro.org/people/mturquette/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Feb 2013 19:02:10 +0000 (11:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Feb 2013 19:02:10 +0000 (11:02 -0800)
Pull clock framework update from Michael Turquette:
 "The common clock framework changes for 3.9 are almost entirely fixes.

  None are dire enough to be Cc'd to stable which may be interpreted to
  mean that users of the framework are reaching stability.  Lots of new
  adoption of this framework is via DeviceTree data and that comes
  through the respective architecture and platform trees instead of
  through the clk framework tree.

  Two new features are improved debugfs output and an improvement to how
  DT clocks are initialized by reusing a common method."

* tag 'clk-for-linus' of git://git.linaro.org/people/mturquette/linux: (25 commits)
  clk: sunxi: remove stale Makefile entry
  clk: vexpress: Use common of_clk_init() function
  clk: zynq: Use common of_clk_init() function
  clk: vt8500: Use common of_clk_init() function
  clk: highbank: Use common of_clk_init() function
  clk: sunxi: Use common of_clk_init() function
  clk: add common of_clk_init() function
  clk: Deduplicate exit code in clk_set_rate
  clk: beautify Makefile
  clk-divider: fix macros
  clk: prima2: enable dt-binding clkdev mapping
  clk: mxs: Index is always positive
  clk: max77686: Avoid double free at remove time
  clk: remove exported function from __init section
  clk: vt8500: Add support for WM8750/WM8850 PLL clocks
  clk: vt8500: Fix division-by-0 when requested rate=0
  clk: vt8500: Fix device clock divisor calculations
  clk: vt8500: Fix error in PLL calculations on non-exact match.
  clk: max77686: Remove unnecessary NULL checking for container_of()
  clk: JSON debugfs clock tree summary
  ...

1688 files changed:
Documentation/ABI/testing/sysfs-bus-event_source-devices-events [new file with mode: 0644]
Documentation/ABI/testing/sysfs-platform-ts5500 [new file with mode: 0644]
Documentation/PCI/MSI-HOWTO.txt
Documentation/atomic_ops.txt
Documentation/cgroups/00-INDEX
Documentation/cgroups/memcg_test.txt
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/i2c/ina209.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/max6697.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/imx-keypad.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
Documentation/devicetree/bindings/mfd/tps6507x.txt [new file with mode: 0755]
Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt [new file with mode: 0644]
Documentation/devicetree/bindings/power_supply/restart-poweroff.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/anatop-regulator.txt
Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/tps51632-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/filesystems/f2fs.txt
Documentation/hid/hid-sensor.txt [changed mode: 0755->0644]
Documentation/hwmon/coretemp
Documentation/hwmon/ina209 [new file with mode: 0644]
Documentation/hwmon/it87
Documentation/hwmon/jc42
Documentation/hwmon/lm73 [new file with mode: 0644]
Documentation/hwmon/max34440
Documentation/hwmon/max6697 [new file with mode: 0644]
Documentation/hwmon/sysfs-interface
Documentation/hwmon/zl6100
Documentation/kernel-parameters.txt
Documentation/memory-barriers.txt
Documentation/pinctrl.txt
Documentation/trace/ftrace.txt
Documentation/x86/boot.txt
Documentation/x86/zero-page.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/Kconfig
arch/alpha/kernel/osf_sys.c
arch/arm/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/armada-370-db.dts
arch/arm/boot/dts/armada-xp-mv78230.dtsi
arch/arm/boot/dts/armada-xp-mv78260.dtsi
arch/arm/boot/dts/armada-xp-mv78460.dtsi
arch/arm/boot/dts/at91rm9200.dtsi
arch/arm/boot/dts/at91sam9260.dtsi
arch/arm/boot/dts/at91sam9263.dtsi
arch/arm/boot/dts/at91sam9g45.dtsi
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/cros5250-common.dtsi
arch/arm/boot/dts/dbx5x0.dtsi
arch/arm/boot/dts/dove-cubox.dts
arch/arm/boot/dts/exynos5250-smdk5250.dts
arch/arm/boot/dts/kirkwood-ns2-common.dtsi
arch/arm/boot/dts/kirkwood.dtsi
arch/arm/boot/dts/kizbox.dts
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a13-olinuxino.dts
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sunxi.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/common/gic.c
arch/arm/configs/at91_dt_defconfig
arch/arm/include/asm/memory.h
arch/arm/include/asm/smp_scu.h
arch/arm/kernel/debug.S
arch/arm/kernel/head.S
arch/arm/kernel/hyp-stub.S
arch/arm/kernel/smp_scu.c
arch/arm/mach-at91/setup.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-highbank/highbank.c
arch/arm/mach-highbank/sysregs.h
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx25.c
arch/arm/mach-imx/clk-imx27.c
arch/arm/mach-imx/clk-imx31.c
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/clk-imx51-imx53.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/common.h
arch/arm/mach-imx/devices/devices-common.h
arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c
arch/arm/mach-imx/devices/platform-imx-fb.c
arch/arm/mach-imx/hotplug.c
arch/arm/mach-imx/iram_alloc.c
arch/arm/mach-imx/platsmp.c
arch/arm/mach-imx/pm-imx6q.c
arch/arm/mach-integrator/pci_v3.c
arch/arm/mach-kirkwood/board-ns2.c
arch/arm/mach-mvebu/Makefile
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/cclock2420_data.c
arch/arm/mach-omap2/cclock2430_data.c
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/drm.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-pxa/include/mach/mfp-pxa27x.h
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-realview/include/mach/irqs-eb.h
arch/arm/mach-s3c64xx/mach-crag6410-module.c
arch/arm/mach-s3c64xx/pm.c
arch/arm/mach-sunxi/Kconfig
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/board-mop500.c
arch/arm/mach-ux500/cpu-db8500.c
arch/arm/mach-ux500/include/mach/irqs-board-mop500.h
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c
arch/arm/plat-versatile/headsmp.S
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S
arch/arm64/Kconfig
arch/arm64/boot/dts/Makefile
arch/arm64/include/asm/elf.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/gettimeofday.S
arch/avr32/include/asm/dma-mapping.h
arch/blackfin/Kconfig
arch/blackfin/include/asm/dma-mapping.h
arch/c6x/include/asm/dma-mapping.h
arch/cris/include/asm/dma-mapping.h
arch/frv/Kconfig
arch/frv/include/asm/dma-mapping.h
arch/hexagon/Kconfig
arch/ia64/Kconfig
arch/ia64/include/asm/cputime.h
arch/ia64/include/asm/thread_info.h
arch/ia64/include/asm/xen/minstate.h
arch/ia64/kernel/asm-offsets.c
arch/ia64/kernel/entry.S
arch/ia64/kernel/fsys.S
arch/ia64/kernel/head.S
arch/ia64/kernel/ivt.S
arch/ia64/kernel/minstate.h
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/time.c
arch/m68k/include/asm/dma-mapping.h
arch/m68k/include/asm/pgtable_no.h
arch/m68k/include/asm/processor.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/m68k/mm/init.c
arch/mips/Kconfig
arch/mips/bcm47xx/Kconfig
arch/mips/cavium-octeon/executive/cvmx-l2c.c
arch/mips/include/asm/dsp.h
arch/mips/include/asm/inst.h
arch/mips/include/asm/mach-pnx833x/war.h
arch/mips/include/asm/pgtable-64.h
arch/mips/include/uapi/asm/Kbuild
arch/mips/include/uapi/asm/break.h [moved from arch/mips/include/asm/break.h with 100% similarity]
arch/mips/kernel/ftrace.c
arch/mips/kernel/mcount.S
arch/mips/kernel/vpe.c
arch/mips/lantiq/irq.c
arch/mips/lib/delay.c
arch/mips/mm/ioremap.c
arch/mips/mm/mmap.c
arch/mips/netlogic/xlr/setup.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mn10300/Kconfig
arch/mn10300/include/asm/dma-mapping.h
arch/parisc/Kconfig
arch/parisc/include/asm/dma-mapping.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/irq.c
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/signal.c
arch/parisc/math-emu/cnv_float.h
arch/powerpc/Kconfig
arch/powerpc/configs/chroma_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/configs/pasemi_defconfig
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/lppaca.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/uapi/asm/kvm_para.h
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s_hv_ras.c
arch/powerpc/kvm/emulate.c
arch/powerpc/mm/hash_low_64.S
arch/powerpc/oprofile/op_model_power4.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/power7-pmu.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/pasemi/cpufreq.c
arch/powerpc/platforms/pseries/dtl.c
arch/powerpc/platforms/pseries/setup.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/include/asm/dma.h
arch/s390/include/asm/io.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/timex.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.S
arch/s390/kernel/debug.c
arch/s390/kernel/irq.c
arch/s390/kernel/nmi.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/runtime_instr.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/vtime.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/oprofile/hwsampler.c
arch/s390/pci/pci.c
arch/s390/pci/pci_dma.c
arch/sh/Kconfig
arch/sh/boards/mach-ecovec24/setup.c
arch/sh/include/asm/elf.h
arch/sh/include/asm/processor_32.h
arch/sh/include/asm/processor_64.h
arch/sh/include/uapi/asm/unistd_32.h
arch/sh/include/uapi/asm/unistd_64.h
arch/sh/kernel/syscalls_32.S
arch/sh/kernel/syscalls_64.S
arch/sh/lib/mcount.S
arch/sparc/Kconfig
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/pci.c
arch/sparc/kernel/pci_psycho.c
arch/sparc/kernel/pci_sabre.c
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/sbus.c
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/mm/gup.c
arch/tile/Kconfig
arch/tile/include/asm/io.h
arch/tile/include/asm/irqflags.h
arch/tile/include/uapi/arch/interrupts_32.h
arch/tile/include/uapi/arch/interrupts_64.h
arch/tile/kernel/intvec_64.S
arch/tile/kernel/process.c
arch/tile/kernel/reboot.c
arch/tile/kernel/setup.c
arch/tile/kernel/stack.c
arch/tile/lib/cacheflush.c
arch/tile/lib/cpumask.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/header.S
arch/x86/boot/setup.ld
arch/x86/boot/tools/build.c
arch/x86/configs/i386_defconfig
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/amd_nb.h
arch/x86/include/asm/bootparam_utils.h [new file with mode: 0644]
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/hpet.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/hypervisor.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq_remapping.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/kvm_para.h
arch/x86/include/asm/linkage.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/pci.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/required-features.h
arch/x86/include/asm/uv/uv.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/include/asm/x86_init.h
arch/x86/include/asm/xor.h
arch/x86/include/asm/xor_32.h
arch/x86/include/asm/xor_64.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/include/uapi/asm/mce.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/Makefile
arch/x86/kernel/apb_timer.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/ipi.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/hypervisor.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_amd.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/kprobes/Makefile [new file with mode: 0644]
arch/x86/kernel/kprobes/common.h [moved from arch/x86/kernel/kprobes-common.h with 90% similarity]
arch/x86/kernel/kprobes/core.c [moved from arch/x86/kernel/kprobes.c with 94% similarity]
arch/x86/kernel/kprobes/ftrace.c [new file with mode: 0644]
arch/x86/kernel/kprobes/opt.c [moved from arch/x86/kernel/kprobes-opt.c with 99% similarity]
arch/x86/kernel/kvm.c
arch/x86/kernel/msr.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/rtc.c
arch/x86/kernel/setup.c
arch/x86/kernel/step.c
arch/x86/kernel/sys_x86_64.c
arch/x86/kernel/tsc.c
arch/x86/kernel/uprobes.c
arch/x86/kernel/x86_init.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/mm/memtest.c
arch/x86/mm/srat.c
arch/x86/mm/tlb.c
arch/x86/pci/mmconfig-shared.c
arch/x86/platform/Makefile
arch/x86/platform/efi/efi-bgrt.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/goldfish/Makefile [new file with mode: 0644]
arch/x86/platform/goldfish/goldfish.c [new file with mode: 0644]
arch/x86/platform/sfi/sfi.c
arch/x86/platform/ts5500/Makefile [new file with mode: 0644]
arch/x86/platform/ts5500/ts5500.c [new file with mode: 0644]
arch/x86/platform/uv/tlb_uv.c
arch/x86/platform/uv/uv_time.c
arch/x86/tools/insn_sanity.c
arch/x86/tools/relocs.c
arch/x86/um/fault.c
arch/x86/vdso/vclock_gettime.c
arch/x86/xen/enlighten.c
arch/x86/xen/smp.c
arch/x86/xen/suspend.c
arch/x86/xen/xen-asm_32.S
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/dma-mapping.h
block/blk-exec.c
block/elevator.c
block/genhd.c
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/cper.c
drivers/acpi/glue.c
drivers/acpi/osl.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/atm/iphase.h
drivers/base/Makefile
drivers/base/cpu.c
drivers/base/dd.c
drivers/base/firmware_class.c
drivers/base/pinctrl.c [new file with mode: 0644]
drivers/base/regmap/Makefile
drivers/base/regmap/internal.h
drivers/base/regmap/regcache-flat.c [new file with mode: 0644]
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap-spi.c
drivers/base/regmap/regmap.c
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon_nflash.c
drivers/bcma/driver_gpio.c
drivers/bcma/main.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_state.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/sunvdc.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/char/virtio_console.c
drivers/clk/mvebu/clk-cpu.c
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/driver.c
drivers/cpuidle/governors/menu.c
drivers/cpuidle/sysfs.c
drivers/devfreq/devfreq.c
drivers/devfreq/exynos4_bus.c
drivers/dma/imx-dma.c
drivers/dma/ioat/dma_v3.c
drivers/dma/tegra20-apb-dma.c
drivers/edac/amd64_edac.c
drivers/edac/amd64_edac.h
drivers/edac/edac_mc.c
drivers/edac/edac_pci_sysfs.c
drivers/firmware/dmi_scan.c
drivers/firmware/efivars.c
drivers/firmware/iscsi_ibft_find.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-ab8500.c [deleted file]
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-samsung.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_connector.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/core/core/client.c
drivers/gpu/drm/nouveau/core/core/falcon.c
drivers/gpu/drm/nouveau/core/core/handle.c
drivers/gpu/drm/nouveau/core/core/subdev.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/include/core/client.h
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/fb/base.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
drivers/gpu/drm/nouveau/core/subdev/vm/base.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nv04_dfp.c
drivers/gpu/drm/nouveau/nv10_fence.c
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_semaphore.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/reg_srcs/cayman
drivers/gpu/drm/radeon/reg_srcs/rv515
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/udl/udl_connector.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hv/Kconfig
drivers/hv/hv_balloon.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/ad7414.c
drivers/hwmon/adm1021.c
drivers/hwmon/adm1026.c
drivers/hwmon/adm1031.c
drivers/hwmon/adm9240.c
drivers/hwmon/ads7828.c
drivers/hwmon/adt7410.c
drivers/hwmon/adt7462.c
drivers/hwmon/adt7470.c
drivers/hwmon/adt7475.c
drivers/hwmon/amc6821.c
drivers/hwmon/asb100.c
drivers/hwmon/asc7621.c
drivers/hwmon/coretemp.c
drivers/hwmon/dme1737.c
drivers/hwmon/emc2103.c
drivers/hwmon/emc6w201.c
drivers/hwmon/f71882fg.c
drivers/hwmon/f75375s.c
drivers/hwmon/fschmd.c
drivers/hwmon/g760a.c
drivers/hwmon/gl518sm.c
drivers/hwmon/gl520sm.c
drivers/hwmon/ina209.c [new file with mode: 0644]
drivers/hwmon/it87.c
drivers/hwmon/jc42.c
drivers/hwmon/lm63.c
drivers/hwmon/lm73.c
drivers/hwmon/lm75.h
drivers/hwmon/lm77.c
drivers/hwmon/lm78.c
drivers/hwmon/lm80.c
drivers/hwmon/lm85.c
drivers/hwmon/lm90.c
drivers/hwmon/lm93.c
drivers/hwmon/lm95245.c
drivers/hwmon/max16065.c
drivers/hwmon/max1668.c
drivers/hwmon/max6639.c
drivers/hwmon/max6642.c
drivers/hwmon/max6650.c
drivers/hwmon/max6697.c [new file with mode: 0644]
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/max34440.c
drivers/hwmon/pmbus/pmbus.h
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/pmbus/zl6100.c
drivers/hwmon/sht15.c
drivers/hwmon/sis5595.c
drivers/hwmon/smsc47m1.c
drivers/hwmon/smsc47m192.c
drivers/hwmon/thmc50.c
drivers/hwmon/tmp102.c
drivers/hwmon/tmp401.c
drivers/hwmon/vexpress.c
drivers/hwmon/via686a.c
drivers/hwmon/vt1211.c
drivers/hwmon/vt8231.c
drivers/hwmon/w83627ehf.c
drivers/hwmon/w83627hf.c
drivers/hwmon/w83781d.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/hwmon/w83795.c
drivers/hwmon/w83l786ng.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-sirf.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/idle/intel_idle.c
drivers/iio/accel/Kconfig
drivers/iio/adc/ad7266.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/max1363.c
drivers/iio/common/hid-sensors/Kconfig
drivers/iio/common/hid-sensors/Makefile
drivers/iio/dac/ad5380.c
drivers/iio/dac/ad5446.c
drivers/iio/dac/ad5504.c
drivers/iio/dac/ad5624r_spi.c
drivers/iio/dac/ad5686.c
drivers/iio/dac/ad5791.c
drivers/iio/frequency/adf4350.c
drivers/iio/gyro/Kconfig
drivers/iio/light/Kconfig
drivers/iio/magnetometer/Kconfig
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/input/Kconfig
drivers/input/input-mt.c
drivers/input/input.c
drivers/input/joystick/analog.c
drivers/input/joystick/walkera0701.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/goldfish_events.c [new file with mode: 0644]
drivers/input/keyboard/imx_keypad.c
drivers/input/keyboard/lm8323.c
drivers/input/keyboard/qt2160.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/misc/adxl34x.c
drivers/input/misc/bma150.c
drivers/input/misc/twl4030-vibra.c
drivers/input/misc/twl6040-vibra.c
drivers/input/misc/wm831x-on.c
drivers/input/mouse/Kconfig
drivers/input/mouse/Makefile
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/cyapa.c [new file with mode: 0644]
drivers/input/mouse/cypress_ps2.c [new file with mode: 0644]
drivers/input/mouse/cypress_ps2.h [new file with mode: 0644]
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse.h
drivers/input/mouse/synaptics.c
drivers/input/serio/Kconfig
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/input/tablet/wacom_wac.h
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/cyttsp_spi.c
drivers/input/touchscreen/mms114.c
drivers/input/touchscreen/stmpe-ts.c
drivers/input/touchscreen/tsc2005.c
drivers/input/touchscreen/wm831x-ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/irq_remapping.c
drivers/iommu/irq_remapping.h
drivers/isdn/gigaset/capi.c
drivers/isdn/mISDN/stack.c
drivers/md/dm-raid.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/i2c/m5mols/m5mols_core.c
drivers/media/platform/coda.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/platform/s5p-fimc/fimc-mdevice.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/radio/radio-keene.c
drivers/media/radio/radio-si4713.c
drivers/media/radio/radio-wl1273.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/media/usb/gspca/kinect.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/gspca/sonixj.c
drivers/media/usb/uvc/uvc_ctrl.c
drivers/media/usb/uvc/uvc_v4l2.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/mfd/Kconfig
drivers/mfd/ab8500-core.c
drivers/mfd/arizona-core.c
drivers/mfd/arizona-irq.c
drivers/mfd/da9052-i2c.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/max77686.c
drivers/mfd/max77693.c
drivers/mfd/pcf50633-core.c
drivers/mfd/rtl8411.c
drivers/mfd/rts5209.c
drivers/mfd/rts5229.c
drivers/mfd/rtsx_pcr.c
drivers/mfd/sec-core.c
drivers/mfd/tc3589x.c
drivers/mfd/twl4030-power.c
drivers/mfd/vexpress-config.c
drivers/mfd/vexpress-sysreg.c
drivers/mfd/wm5102-tables.c
drivers/mfd/wm5110-tables.c
drivers/misc/atmel-ssc.c
drivers/misc/mei/amthif.c
drivers/misc/sgi-gru/grufile.c
drivers/misc/ti-st/st_kim.c
drivers/mmc/host/mvsdio.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mtd/devices/Kconfig
drivers/mtd/maps/physmap_of.c
drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
drivers/mtd/nand/davinci_nand.c
drivers/mtd/nand/nand_base.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/c_can/c_can.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/ti_hecc.c
drivers/net/ethernet/3com/3c574_cs.c
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/Kconfig
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/phy/icplus.c
drivers/net/phy/marvell.c
drivers/net/tun.c
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/dm9601.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wimax/i2400m/netdev.c
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/Makefile
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/htc_hst.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/wil6210/Kconfig [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/Makefile [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/cfg80211.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/dbg_hexdump.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/debugfs.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/interrupt.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/main.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/netdev.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/pcie_bus.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wil6210.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wmi.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/wmi.h [new file with mode: 0644]
drivers/net/wireless/b43/b43.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/main.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2100.h
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/ti/wl1251/ps.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/shpchp.h
drivers/pci/hotplug/shpchp_core.c
drivers/pci/hotplug/shpchp_ctrl.c
drivers/pci/iov.c
drivers/pci/msi.c
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/aer/aerdrv_errprint.c
drivers/pci/pcie/aspm.c
drivers/pci/remove.c
drivers/pinctrl/Kconfig
drivers/pinctrl/Makefile
drivers/pinctrl/core.c
drivers/pinctrl/core.h
drivers/pinctrl/devicetree.c
drivers/pinctrl/mvebu/pinctrl-dove.c
drivers/pinctrl/mvebu/pinctrl-kirkwood.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinctrl-ab8500.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-ab8505.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-ab8540.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-ab9540.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-abx500.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-abx500.h [new file with mode: 0644]
drivers/pinctrl/pinctrl-exynos5440.c
drivers/pinctrl/pinctrl-falcon.c
drivers/pinctrl/pinctrl-lantiq.c
drivers/pinctrl/pinctrl-lantiq.h
drivers/pinctrl/pinctrl-mxs.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-samsung.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-sirf.c
drivers/pinctrl/pinctrl-sunxi.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-sunxi.h [new file with mode: 0644]
drivers/pinctrl/pinctrl-tegra.c
drivers/pinctrl/pinctrl-tegra.h
drivers/pinctrl/pinctrl-tegra114.c [new file with mode: 0644]
drivers/pinctrl/pinctrl-tegra20.c
drivers/pinctrl/pinctrl-tegra30.c
drivers/pinctrl/pinctrl-xway.c
drivers/platform/x86/acer-wmi.c
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/ibm_rtl.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/88pm860x_battery.c
drivers/power/Kconfig
drivers/power/Makefile
drivers/power/ab8500_bmdata.c
drivers/power/ab8500_btemp.c
drivers/power/ab8500_charger.c
drivers/power/ab8500_fg.c
drivers/power/abx500_chargalg.c
drivers/power/bq2415x_charger.c
drivers/power/bq27x00_battery.c
drivers/power/charger-manager.c
drivers/power/da9030_battery.c
drivers/power/da9052-battery.c
drivers/power/ds2782_battery.c
drivers/power/generic-adc-battery.c
drivers/power/goldfish_battery.c [new file with mode: 0644]
drivers/power/lp8727_charger.c
drivers/power/lp8788-charger.c
drivers/power/max17040_battery.c
drivers/power/pm2301_charger.c [new file with mode: 0644]
drivers/power/pm2301_charger.h [new file with mode: 0644]
drivers/power/power_supply_sysfs.c
drivers/power/reset/Kconfig
drivers/power/reset/Makefile
drivers/power/reset/qnap-poweroff.c [new file with mode: 0644]
drivers/power/reset/restart-poweroff.c [new file with mode: 0644]
drivers/regulator/88pm8607.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/anatop-regulator.c
drivers/regulator/arizona-micsupp.c
drivers/regulator/as3711-regulator.c
drivers/regulator/core.c
drivers/regulator/da9052-regulator.c
drivers/regulator/da9055-regulator.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/gpio-regulator.c
drivers/regulator/lp3971.c
drivers/regulator/lp3972.c
drivers/regulator/lp872x.c
drivers/regulator/lp8755.c [new file with mode: 0644]
drivers/regulator/lp8788-buck.c
drivers/regulator/lp8788-ldo.c
drivers/regulator/max77686.c
drivers/regulator/max8907-regulator.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8997.c
drivers/regulator/max8998.c
drivers/regulator/mc13892-regulator.c
drivers/regulator/mc13xxx-regulator-core.c
drivers/regulator/mc13xxx.h
drivers/regulator/of_regulator.c
drivers/regulator/palmas-regulator.c
drivers/regulator/s2mps11.c
drivers/regulator/s5m8767.c
drivers/regulator/tps51632-regulator.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/tps65090-regulator.c
drivers/regulator/tps65217-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/tps80031-regulator.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/class.c
drivers/rtc/rtc-da9055.c
drivers/rtc/rtc-isl1208.c
drivers/rtc/rtc-pl031.c
drivers/rtc/rtc-vt8500.c
drivers/rtc/systohc.c [new file with mode: 0644]
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/char/con3215.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp.c
drivers/s390/char/tape_34xx.c
drivers/s390/char/tape_3590.c
drivers/s390/char/vmur.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/device.c
drivers/s390/cio/device.h
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/qdio_thinint.c
drivers/s390/crypto/ap_bus.c
drivers/s390/kvm/kvm_virtio.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/lcs.c
drivers/scsi/isci/init.c
drivers/sh/clk/cpg.c
drivers/spi/spi.c
drivers/ssb/driver_gpio.c
drivers/ssb/main.c
drivers/ssb/ssb_private.h
drivers/staging/comedi/Kconfig
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/comedi_test.c
drivers/staging/comedi/drivers/ni_pcimio.c
drivers/staging/csr/bh.c
drivers/staging/csr/unifi_sme.c
drivers/staging/fwserial/Kconfig
drivers/staging/fwserial/TODO
drivers/staging/fwserial/fwserial.c
drivers/staging/fwserial/fwserial.h
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/gyro/Kconfig
drivers/staging/iio/gyro/adis16080_core.c
drivers/staging/iio/trigger/Kconfig
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/ipu-v3/ipu-common.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/staging/omapdrm/Kconfig
drivers/staging/omapdrm/Makefile
drivers/staging/omapdrm/TODO
drivers/staging/omapdrm/omap_connector.c
drivers/staging/omapdrm/omap_crtc.c
drivers/staging/omapdrm/omap_drv.c
drivers/staging/omapdrm/omap_drv.h
drivers/staging/omapdrm/omap_encoder.c
drivers/staging/omapdrm/omap_gem_dmabuf.c
drivers/staging/omapdrm/omap_irq.c [new file with mode: 0644]
drivers/staging/omapdrm/omap_plane.c
drivers/staging/rtl8187se/r8180_core.c
drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
drivers/staging/rtl8192e/rtl8192e/rtl_core.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/sb105x/Kconfig
drivers/staging/sb105x/sb_pci_mp.c
drivers/staging/speakup/synth.c
drivers/staging/tidspbridge/core/_tiomap.h
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/staging/tidspbridge/core/wdt.c
drivers/staging/vme/devices/vme_pio2_core.c
drivers/staging/vt6656/bssdb.h
drivers/staging/vt6656/int.h
drivers/staging/vt6656/iocmd.h
drivers/staging/vt6656/iowpa.h
drivers/staging/wlan-ng/cfg80211.c
drivers/staging/wlan-ng/prism2mgmt.c
drivers/staging/zram/zram_drv.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/target_core_alua.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_pr.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_sess.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/ifx6x60.c
drivers/tty/serial/max3100.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/samsung.c
drivers/tty/serial/vt8500_serial.c
drivers/tty/sysrq.c
drivers/usb/Kconfig
drivers/usb/chipidea/host.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/fsl_mxc_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/mv_udc_core.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/tcm_usb_gadget.c
drivers/usb/gadget/u_serial.c
drivers/usb/host/Kconfig
drivers/usb/host/Makefile
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-mv.c
drivers/usb/host/ehci-mxc.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-timer.c
drivers/usb/host/ehci.h
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/imx21-hcd.c
drivers/usb/host/ohci-tmio.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/uhci-hcd.c
drivers/usb/host/uhci-hub.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/usbtest.c
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_dsps.c
drivers/usb/otg/Kconfig
drivers/usb/otg/mv_otg.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/mod_host.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/io_ti.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/initializers.c
drivers/usb/storage/initializers.h
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/usb.c
drivers/usb/storage/usual-tables.c
drivers/vfio/pci/vfio_pci_rdwr.c
drivers/vhost/net.c
drivers/vhost/tcm_vhost.c
drivers/vhost/vhost.c
drivers/vhost/vhost.h
drivers/video/exynos/exynos_dp_core.c
drivers/video/imxfb.c
drivers/video/omap2/dss/dss_features.c
drivers/video/ssd1307fb.c
drivers/xen/cpu_hotplug.c
drivers/xen/events.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/pcpu.c
drivers/xen/privcmd.c
drivers/xen/xen-pciback/pciback.h
drivers/xen/xen-pciback/pciback_ops.c
fs/Kconfig
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/buffer.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/connect.c
fs/debugfs/inode.c
fs/dlm/user.c
fs/exec.c
fs/f2fs/acl.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fuse/Kconfig
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/lock_dlm.c
fs/jbd/journal.c
fs/nfs/namespace.c
fs/nfs/nfs4client.c
fs/nfs/nfs4state.c
fs/nfs/super.c
fs/nilfs2/ioctl.c
fs/proc/array.c
fs/pstore/ram.c
fs/select.c
fs/seq_file.c
fs/udf/super.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans_buf.c
include/asm-generic/cputime.h
include/asm-generic/cputime_jiffies.h [new file with mode: 0644]
include/asm-generic/cputime_nsecs.h [new file with mode: 0644]
include/asm-generic/dma-mapping-broken.h
include/asm-generic/pgtable.h
include/asm-generic/syscalls.h
include/drm/drm_mm.h
include/linux/aer.h
include/linux/async.h
include/linux/ata.h
include/linux/audit.h
include/linux/bma150.h
include/linux/cgroup.h
include/linux/clockchips.h
include/linux/compaction.h
include/linux/context_tracking.h
include/linux/cpu_rmap.h
include/linux/cpuidle.h
include/linux/device.h
include/linux/efi.h
include/linux/elevator.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/hardirq.h
include/linux/hwmon.h
include/linux/init.h
include/linux/init_task.h
include/linux/input/adxl34x.h
include/linux/input/tegra_kbc.h [deleted file]
include/linux/interrupt.h
include/linux/irq.h
include/linux/irq_work.h
include/linux/kernel_stat.h
include/linux/kprobes.h
include/linux/kvm_host.h
include/linux/libata.h
include/linux/libps2.h
include/linux/llist.h
include/linux/lockdep.h
include/linux/memcontrol.h
include/linux/mfd/abx500.h
include/linux/mfd/abx500/ab8500-bm.h
include/linux/mfd/abx500/ab8500-gpio.h
include/linux/mfd/abx500/ab8500.h
include/linux/mfd/abx500/ux500_chargalg.h
include/linux/mfd/da9052/da9052.h
include/linux/mfd/da9052/reg.h
include/linux/mfd/rtsx_common.h
include/linux/mfd/rtsx_pci.h
include/linux/mfd/samsung/core.h
include/linux/mm.h
include/linux/mmu_notifier.h
include/linux/module.h
include/linux/netdevice.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pinctrl/devinfo.h [new file with mode: 0644]
include/linux/pinctrl/pinconf-generic.h
include/linux/pinctrl/pinctrl.h
include/linux/platform_data/imx-iram.h [moved from arch/arm/mach-imx/iram.h with 100% similarity]
include/linux/platform_data/lp8755.h [new file with mode: 0644]
include/linux/platform_data/max6697.h [new file with mode: 0644]
include/linux/pm2301_charger.h [new file with mode: 0644]
include/linux/power/bq2415x_charger.h
include/linux/power_supply.h
include/linux/printk.h
include/linux/profile.h
include/linux/ptrace.h
include/linux/rbtree_augmented.h
include/linux/rcupdate.h
include/linux/regmap.h
include/linux/regulator/driver.h
include/linux/ring_buffer.h
include/linux/rtc.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/sched/rt.h [new file with mode: 0644]
include/linux/sched/sysctl.h [new file with mode: 0644]
include/linux/security.h
include/linux/smpboot.h
include/linux/srcu.h
include/linux/tick.h
include/linux/time.h
include/linux/tsacct_kern.h
include/linux/uprobes.h
include/linux/usb.h
include/linux/usb/hcd.h
include/linux/usb/usbnet.h
include/linux/vtime.h
include/linux/workqueue.h
include/net/ip.h
include/net/netfilter/nf_conntrack_core.h
include/net/transp_v6.h
include/sound/cs4271.h
include/sound/soc.h
include/target/target_core_base.h
include/trace/events/ras.h [new file with mode: 0644]
include/trace/events/rcu.h
include/trace/events/workqueue.h
include/uapi/linux/audit.h
include/uapi/linux/auto_fs.h
include/uapi/linux/perf_event.h
include/uapi/linux/serial_core.h
include/uapi/linux/usb/ch9.h
init/Kconfig
init/do_mounts_initrd.c
init/init_task.c
init/initramfs.c
init/main.c
kernel/acct.c
kernel/async.c
kernel/audit.c
kernel/audit_tree.c
kernel/audit_watch.c
kernel/auditfilter.c
kernel/auditsc.c
kernel/cgroup.c
kernel/compat.c
kernel/context_tracking.c
kernel/cpu.c
kernel/cpuset.c
kernel/debug/kdb/kdb_main.c
kernel/delayacct.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/uprobes.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/hrtimer.c
kernel/irq/chip.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/irq_work.c
kernel/kmod.c
kernel/kprobes.c
kernel/module.c
kernel/mutex.c
kernel/pid.c
kernel/posix-cpu-timers.c
kernel/posix-timers.c
kernel/printk.c
kernel/profile.c
kernel/ptrace.c
kernel/rcu.h
kernel/rcupdate.c
kernel/rcutiny.c
kernel/rcutiny_plugin.h
kernel/rcutorture.c
kernel/rcutree.c
kernel/rcutree.h
kernel/rcutree_plugin.h
kernel/rtmutex-debug.c
kernel/rtmutex-tester.c
kernel/rtmutex.c
kernel/rwsem.c
kernel/sched/auto_group.c
kernel/sched/core.c
kernel/sched/cpupri.c
kernel/sched/cputime.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/signal.c
kernel/smp.c
kernel/smpboot.c
kernel/softirq.c
kernel/srcu.c
kernel/stop_machine.c
kernel/sysctl.c
kernel/time.c
kernel/time/Kconfig
kernel/time/ntp.c
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/timeconst.pl
kernel/timer.c
kernel/trace/Kconfig
kernel/trace/blktrace.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_clock.c
kernel/trace/trace_events.c
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_probe.h
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_selftest.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
kernel/tsacct.c
kernel/watchdog.c
kernel/workqueue.c
kernel/workqueue_internal.h [new file with mode: 0644]
kernel/workqueue_sched.h [deleted file]
lib/Kconfig.debug
lib/bug.c
lib/cpu_rmap.c
lib/digsig.c
lib/rbtree.c
mm/bootmem.c
mm/compaction.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/memblock.c
mm/memcontrol.c
mm/migrate.c
mm/mlock.c
mm/mmap.c
mm/mremap.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
net/batman-adv/distributed-arp-table.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hidp/core.c
net/bluetooth/l2cap_core.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bridge/br_stp_bpdu.c
net/core/datagram.c
net/core/dev.c
net/core/pktgen.c
net/core/request_sock.c
net/core/scm.c
net/core/skbuff.c
net/ipv4/ah4.c
net/ipv4/arp.c
net/ipv4/datagram.c
net/ipv4/esp4.c
net/ipv4/ip_gre.c
net/ipv4/ip_sockglue.c
net/ipv4/ipcomp.c
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/icmp.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6t_NPT.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/iucv/iucv.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/x_tables.c
net/netfilter/xt_CT.c
net/openvswitch/vport-netdev.c
net/packet/af_packet.c
net/rfkill/input.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sctp/Kconfig
net/sctp/auth.c
net/sctp/endpointola.c
net/sctp/ipv6.c
net/sctp/outqueue.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sunrpc/clnt.c
net/sunrpc/sched.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/wireless/core.c
net/wireless/scan.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
samples/Kconfig
samples/Makefile
samples/seccomp/Makefile
samples/tracepoints/Makefile [deleted file]
samples/tracepoints/tp-samples-trace.h [deleted file]
samples/tracepoints/tracepoint-probe-sample.c [deleted file]
samples/tracepoints/tracepoint-probe-sample2.c [deleted file]
samples/tracepoints/tracepoint-sample.c [deleted file]
scripts/checkpatch.pl
security/capability.c
security/device_cgroup.c
security/integrity/evm/evm_crypto.c
security/security.c
security/selinux/hooks.c
security/selinux/include/classmap.h
security/selinux/include/objsec.h
sound/arm/pxa2xx-ac97-lib.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdspm.c
sound/soc/codecs/arizona.c
sound/soc/codecs/arizona.h
sound/soc/codecs/cs4271.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/lm49453.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sta529.c
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm2200.c
sound/soc/codecs/wm5100.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/imx-pcm-dma.c
sound/soc/fsl/imx-pcm-fiq.c
sound/soc/fsl/imx-pcm.c
sound/soc/fsl/imx-pcm.h
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/usb/mixer.c
sound/usb/mixer_maps.c
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/Makefile
tools/cgroup/.gitignore [new file with mode: 0644]
tools/cgroup/Makefile [new file with mode: 0644]
tools/cgroup/cgroup_event_listener.c [moved from Documentation/cgroups/cgroup_event_listener.c with 54% similarity]
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-utils.h
tools/lib/traceevent/parse-filter.c
tools/lib/traceevent/parse-utils.c
tools/lib/traceevent/trace-seq.c
tools/perf/Documentation/Makefile
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-buildid-cache.txt
tools/perf/Documentation/perf-diff.txt
tools/perf/Documentation/perf-evlist.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-test.txt
tools/perf/Documentation/perf-top.txt
tools/perf/MANIFEST
tools/perf/Makefile
tools/perf/arch/common.c
tools/perf/bench/bench.h
tools/perf/bench/numa.c [new file with mode: 0644]
tools/perf/builtin-annotate.c
tools/perf/builtin-bench.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-buildid-list.c
tools/perf/builtin-diff.c
tools/perf/builtin-evlist.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/feature-tests.mak
tools/perf/config/utilities.mak
tools/perf/perf.c
tools/perf/perf.h
tools/perf/scripts/perl/bin/workqueue-stats-record [deleted file]
tools/perf/scripts/perl/bin/workqueue-stats-report [deleted file]
tools/perf/scripts/perl/rwtop.pl
tools/perf/scripts/perl/workqueue-stats.pl [deleted file]
tools/perf/tests/attr.c
tools/perf/tests/attr.py
tools/perf/tests/attr/base-record
tools/perf/tests/attr/test-record-group
tools/perf/tests/attr/test-record-group1
tools/perf/tests/builtin-test.c
tools/perf/tests/evsel-roundtrip-name.c
tools/perf/tests/hists_link.c [new file with mode: 0644]
tools/perf/tests/mmap-basic.c
tools/perf/tests/open-syscall-all-cpus.c
tools/perf/tests/open-syscall.c
tools/perf/tests/parse-events.c
tools/perf/tests/perf-record.c
tools/perf/tests/pmu.c
tools/perf/tests/python-use.c [new file with mode: 0644]
tools/perf/tests/tests.h
tools/perf/tests/util.c [deleted file]
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/ui/browser.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/gtk/annotate.c [new file with mode: 0644]
tools/perf/ui/gtk/browser.c
tools/perf/ui/gtk/gtk.h
tools/perf/ui/gtk/helpline.c
tools/perf/ui/gtk/hists.c [new file with mode: 0644]
tools/perf/ui/helpline.c
tools/perf/ui/helpline.h
tools/perf/ui/hist.c
tools/perf/ui/keysyms.h
tools/perf/ui/setup.c
tools/perf/ui/stdio/hist.c
tools/perf/ui/tui/helpline.c
tools/perf/ui/util.c
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/bitops.h
tools/perf/util/intlist.c
tools/perf/util/intlist.h
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/pmu.y
tools/perf/util/probe-finder.c
tools/perf/util/python-ext-sources
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/string.c
tools/perf/util/strlist.c
tools/perf/util/strlist.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol-minimal.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/sysfs.c
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/top.c
tools/perf/util/top.h
tools/perf/util/util.c
tools/perf/util/util.h
tools/vm/.gitignore [new file with mode: 0644]

diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-events b/Documentation/ABI/testing/sysfs-bus-event_source-devices-events
new file mode 100644 (file)
index 0000000..0adeb52
--- /dev/null
@@ -0,0 +1,62 @@
+What:          /sys/devices/cpu/events/
+               /sys/devices/cpu/events/branch-misses
+               /sys/devices/cpu/events/cache-references
+               /sys/devices/cpu/events/cache-misses
+               /sys/devices/cpu/events/stalled-cycles-frontend
+               /sys/devices/cpu/events/branch-instructions
+               /sys/devices/cpu/events/stalled-cycles-backend
+               /sys/devices/cpu/events/instructions
+               /sys/devices/cpu/events/cpu-cycles
+
+Date:          2013/01/08
+
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+
+Description:   Generic performance monitoring events
+
+               A collection of performance monitoring events that may be
+               supported by many/most CPUs. These events can be monitored
+               using the 'perf(1)' tool.
+
+               The contents of each file would look like:
+
+                       event=0xNNNN
+
+               where 'N' is a hex digit and the number '0xNNNN' shows the
+               "raw code" for the perf event identified by the file's
+               "basename".
+
+
+What:          /sys/devices/cpu/events/PM_LD_MISS_L1
+               /sys/devices/cpu/events/PM_LD_REF_L1
+               /sys/devices/cpu/events/PM_CYC
+               /sys/devices/cpu/events/PM_BRU_FIN
+               /sys/devices/cpu/events/PM_GCT_NOSLOT_CYC
+               /sys/devices/cpu/events/PM_BRU_MPRED
+               /sys/devices/cpu/events/PM_INST_CMPL
+               /sys/devices/cpu/events/PM_CMPLU_STALL
+
+Date:          2013/01/08
+
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
+               Linux Powerpc mailing list <linuxppc-dev@ozlabs.org>
+
+Description:   POWER-systems specific performance monitoring events
+
+               A collection of performance monitoring events that may be
+               supported by the POWER CPU. These events can be monitored
+               using the 'perf(1)' tool.
+
+               These events may not be supported by other CPUs.
+
+               The contents of each file would look like:
+
+                       event=0xNNNN
+
+               where 'N' is a hex digit and the number '0xNNNN' shows the
+               "raw code" for the perf event identified by the file's
+               "basename".
+
+               Further, multiple terms like 'event=0xNNNN' can be specified
+               and separated with comma. All available terms are defined in
+               the /sys/bus/event_source/devices/<dev>/format file.
diff --git a/Documentation/ABI/testing/sysfs-platform-ts5500 b/Documentation/ABI/testing/sysfs-platform-ts5500
new file mode 100644 (file)
index 0000000..c88375a
--- /dev/null
@@ -0,0 +1,47 @@
+What:          /sys/devices/platform/ts5500/adc
+Date:          January 2013
+KernelVersion: 3.7
+Contact:       "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
+Description:
+               Indicates the presence of an A/D Converter. If it is present,
+               it will display "1", otherwise "0".
+
+What:          /sys/devices/platform/ts5500/ereset
+Date:          January 2013
+KernelVersion: 3.7
+Contact:       "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
+Description:
+               Indicates the presence of an external reset. If it is present,
+               it will display "1", otherwise "0".
+
+What:          /sys/devices/platform/ts5500/id
+Date:          January 2013
+KernelVersion: 3.7
+Contact:       "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
+Description:
+               Product ID of the TS board. TS-5500 ID is 0x60.
+
+What:          /sys/devices/platform/ts5500/jumpers
+Date:          January 2013
+KernelVersion: 3.7
+Contact:       "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
+Description:
+               Bitfield showing the jumpers' state. If a jumper is present,
+               the corresponding bit is set. For instance, 0x0e means jumpers
+               2, 3 and 4 are set.
+
+What:          /sys/devices/platform/ts5500/rs485
+Date:          January 2013
+KernelVersion: 3.7
+Contact:       "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
+Description:
+               Indicates the presence of the RS485 option. If it is present,
+               it will display "1", otherwise "0".
+
+What:          /sys/devices/platform/ts5500/sram
+Date:          January 2013
+KernelVersion: 3.7
+Contact:       "Savoir-faire Linux Inc." <kernel@savoirfairelinux.com>
+Description:
+               Indicates the presence of the SRAM option. If it is present,
+               it will display "1", otherwise "0".
index 53e6fca146d73919e9b7dbc4fd32631a18141795..a09178086c309fc4457104f2515884573d46af94 100644 (file)
@@ -127,15 +127,42 @@ on the number of vectors that can be allocated; pci_enable_msi_block()
 returns as soon as it finds any constraint that doesn't allow the
 call to succeed.
 
-4.2.3 pci_disable_msi
+4.2.3 pci_enable_msi_block_auto
+
+int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *count)
+
+This variation on pci_enable_msi() call allows a device driver to request
+the maximum possible number of MSIs.  The MSI specification only allows
+interrupts to be allocated in powers of two, up to a maximum of 2^5 (32).
+
+If this function returns a positive number, it indicates that it has
+succeeded and the returned value is the number of allocated interrupts. In
+this case, the function enables MSI on this device and updates dev->irq to
+be the lowest of the new interrupts assigned to it.  The other interrupts
+assigned to the device are in the range dev->irq to dev->irq + returned
+value - 1.
+
+If this function returns a negative number, it indicates an error and
+the driver should not attempt to request any more MSI interrupts for
+this device.
+
+If the device driver needs to know the number of interrupts the device
+supports it can pass the pointer count where that number is stored. The
+device driver must decide what action to take if pci_enable_msi_block_auto()
+succeeds, but returns a value less than the number of interrupts supported.
+If the device driver does not need to know the number of interrupts
+supported, it can set the pointer count to NULL.
+
+4.2.4 pci_disable_msi
 
 void pci_disable_msi(struct pci_dev *dev)
 
 This function should be used to undo the effect of pci_enable_msi() or
-pci_enable_msi_block().  Calling it restores dev->irq to the pin-based
-interrupt number and frees the previously allocated message signaled
-interrupt(s).  The interrupt may subsequently be assigned to another
-device, so drivers should not cache the value of dev->irq.
+pci_enable_msi_block() or pci_enable_msi_block_auto().  Calling it restores
+dev->irq to the pin-based interrupt number and frees the previously
+allocated message signaled interrupt(s).  The interrupt may subsequently be
+assigned to another device, so drivers should not cache the value of
+dev->irq.
 
 Before calling this function, a device driver must always call free_irq()
 on any interrupt for which it previously called request_irq().
index 27f2b21a9d5cd5e040da0ab0e9cb9abbb69296a6..d9ca5be9b471d18d7c2640625d4845f9da8ff23a 100644 (file)
@@ -253,6 +253,8 @@ This performs an atomic exchange operation on the atomic variable v, setting
 the given new value.  It returns the old value that the atomic variable v had
 just before the operation.
 
+atomic_xchg requires explicit memory barriers around the operation.
+
        int atomic_cmpxchg(atomic_t *v, int old, int new);
 
 This performs an atomic compare exchange operation on the atomic value v,
index f78b90a35ad09a6bde9be02aa39dc5ca86c313f4..f5635a09c3f68e9e99c4446ac6efcd9b8b6b773a 100644 (file)
@@ -4,8 +4,6 @@ blkio-controller.txt
        - Description for Block IO Controller, implementation and usage details.
 cgroups.txt
        - Control Groups definition, implementation details, examples and API.
-cgroup_event_listener.c
-       - A user program for cgroup listener.
 cpuacct.txt
        - CPU Accounting Controller; account CPU usage for groups of tasks.
 cpusets.txt
index fc8fa97a09acd8124725351153cec5c0b9e283f6..ce94a83a7d9ae54e11bcc8483ac3a607516e0639 100644 (file)
@@ -399,8 +399,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
 
  9.10 Memory thresholds
        Memory controller implements memory thresholds using cgroups notification
-       API. You can use Documentation/cgroups/cgroup_event_listener.c to test
-       it.
+       API. You can use tools/cgroup/cgroup_event_listener.c to test it.
 
        (Shell-A) Create cgroup and run event listener
        # mkdir /cgroup/A
index 728c38c242d631b88117958360c6f8441a72be4b..56fb62b09fc59ad757fc81de6ad6e478b3b0184b 100644 (file)
@@ -141,3 +141,4 @@ Version History
 1.2.0  Handle creation of arrays that contain failed devices.
 1.3.0  Added support for RAID 10
 1.3.1  Allow device replacement/rebuild for RAID 10
+1.3.2   Fix/improve redundancy checking for RAID10
diff --git a/Documentation/devicetree/bindings/i2c/ina209.txt b/Documentation/devicetree/bindings/i2c/ina209.txt
new file mode 100644 (file)
index 0000000..9dd2bee
--- /dev/null
@@ -0,0 +1,18 @@
+ina209 properties
+
+Required properties:
+- compatible: Must be "ti,ina209"
+- reg: I2C address
+
+Optional properties:
+
+- shunt-resistor
+       Shunt resistor value in micro-Ohm
+
+Example:
+
+temp-sensor@4c {
+       compatible = "ti,ina209";
+       reg = <0x4c>;
+       shunt-resistor = <5000>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/max6697.txt b/Documentation/devicetree/bindings/i2c/max6697.txt
new file mode 100644 (file)
index 0000000..5f79399
--- /dev/null
@@ -0,0 +1,64 @@
+max6697 properties
+
+Required properties:
+- compatible:
+       Should be one of
+               maxim,max6581
+               maxim,max6602
+               maxim,max6622
+               maxim,max6636
+               maxim,max6689
+               maxim,max6693
+               maxim,max6694
+               maxim,max6697
+               maxim,max6698
+               maxim,max6699
+- reg: I2C address
+
+Optional properties:
+
+- smbus-timeout-disable
+       Set to disable SMBus timeout. If not specified, SMBus timeout will be
+       enabled.
+- extended-range-enable
+       Only valid for MAX6581. Set to enable extended temperature range.
+       Extended temperature will be disabled if not specified.
+- beta-compensation-enable
+       Only valid for MAX6693 and MX6694. Set to enable beta compensation on
+       remote temperature channel 1.
+       Beta compensation will be disabled if not specified.
+- alert-mask
+       Alert bit mask. Alert disabled for bits set.
+       Select bit 0 for local temperature, bit 1..7 for remote temperatures.
+       If not specified, alert will be enabled for all channels.
+- over-temperature-mask
+       Over-temperature bit mask. Over-temperature reporting disabled for
+       bits set.
+       Select bit 0 for local temperature, bit 1..7 for remote temperatures.
+       If not specified, over-temperature reporting will be enabled for all
+       channels.
+- resistance-cancellation
+       Boolean for all chips other than MAX6581. Set to enable resistance
+       cancellation on remote temperature channel 1.
+       For MAX6581, resistance cancellation enabled for all channels if
+       specified as boolean, otherwise as per bit mask specified.
+       Only supported for remote temperatures (bit 1..7).
+       If not specified, resistance cancellation will be disabled for all
+       channels.
+- transistor-ideality
+       For MAX6581 only. Two values; first is bit mask, second is ideality
+       select value as per MAX6581 data sheet. Select bit 1..7 for remote
+       channels.
+       Transistor ideality will be initialized to default (1.008) if not
+       specified.
+
+Example:
+
+temp-sensor@1a {
+       compatible = "maxim,max6697";
+       reg = <0x1a>;
+       smbus-timeout-disable;
+       resistance-cancellation;
+       alert-mask = <0x72>;
+       over-temperature-mask = <0x7f>;
+};
diff --git a/Documentation/devicetree/bindings/input/imx-keypad.txt b/Documentation/devicetree/bindings/input/imx-keypad.txt
new file mode 100644 (file)
index 0000000..2ebaf7d
--- /dev/null
@@ -0,0 +1,53 @@
+* Freescale i.MX Keypad Port(KPP) device tree bindings
+
+The KPP is designed to interface with a keypad matrix with 2-point contact
+or 3-point contact keys. The KPP is designed to simplify the software task
+of scanning a keypad matrix. The KPP is capable of detecting, debouncing,
+and decoding one or multiple keys pressed simultaneously on a keypad.
+
+Required SoC Specific Properties:
+- compatible: Should be "fsl,<soc>-kpp".
+
+- reg: Physical base address of the KPP and length of memory mapped
+  region.
+
+- interrupts: The KPP interrupt number to the CPU(s).
+
+- clocks: The clock provided by the SoC to the KPP. Some SoCs use dummy
+clock(The clock for the KPP is provided by the SoCs automatically).
+
+Required Board Specific Properties:
+- pinctrl-names: The definition can be found at
+pinctrl/pinctrl-bindings.txt.
+
+- pinctrl-0: The definition can be found at
+pinctrl/pinctrl-bindings.txt.
+
+- linux,keymap: The definition can be found at
+bindings/input/matrix-keymap.txt.
+
+Example:
+kpp: kpp@73f94000 {
+       compatible = "fsl,imx51-kpp", "fsl,imx21-kpp";
+       reg = <0x73f94000 0x4000>;
+       interrupts = <60>;
+       clocks = <&clks 0>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_kpp_1>;
+       linux,keymap = <0x00000067      /* KEY_UP */
+                       0x0001006c      /* KEY_DOWN */
+                       0x00020072      /* KEY_VOLUMEDOWN */
+                       0x00030066      /* KEY_HOME */
+                       0x0100006a      /* KEY_RIGHT */
+                       0x01010069      /* KEY_LEFT */
+                       0x0102001c      /* KEY_ENTER */
+                       0x01030073      /* KEY_VOLUMEUP */
+                       0x02000040      /* KEY_F6 */
+                       0x02010042      /* KEY_F8 */
+                       0x02020043      /* KEY_F9 */
+                       0x02030044      /* KEY_F10 */
+                       0x0300003b      /* KEY_F1 */
+                       0x0301003c      /* KEY_F2 */
+                       0x0302003d      /* KEY_F3 */
+                       0x03030074>;    /* KEY_POWER */
+};
index 72683be6de35ea18f17f6214e30087003c87c587..2995fae7ee474ce81b1a4fdeca3e195d77154ded 100644 (file)
@@ -1,7 +1,18 @@
 * Tegra keyboard controller
+The key controller has maximum 24 pins to make matrix keypad. Any pin
+can be configured as row or column. The maximum column pin can be 8
+and maximum row pins can be 16 for Tegra20/Tegra30.
 
 Required properties:
 - compatible: "nvidia,tegra20-kbc"
+- reg: Register base address of KBC.
+- interrupts: Interrupt number for the KBC.
+- nvidia,kbc-row-pins: The KBC pins which are configured as row. This is an
+  array of pin numbers which is used as rows.
+- nvidia,kbc-col-pins: The KBC pins which are configured as column. This is an
+  array of pin numbers which is used as column.
+- linux,keymap: The keymap for keys as described in the binding document
+  devicetree/bindings/input/matrix-keymap.txt.
 
 Optional properties, in addition to those specified by the shared
 matrix-keyboard bindings:
@@ -19,5 +30,16 @@ Example:
 keyboard: keyboard {
        compatible = "nvidia,tegra20-kbc";
        reg = <0x7000e200 0x100>;
+       interrupts = <0 85 0x04>;
        nvidia,ghost-filter;
+       nvidia,debounce-delay-ms = <640>;
+       nvidia,kbc-row-pins = <0 1 2>;    /* pin 0, 1, 2 as rows */
+       nvidia,kbc-col-pins = <11 12 13>; /* pin 11, 12, 13 as columns */
+       linux,keymap = <0x00000074
+                       0x00010067
+                       0x00020066
+                       0x01010068
+                       0x02000069
+                       0x02010070
+                       0x02020071>;
 };
diff --git a/Documentation/devicetree/bindings/mfd/tps6507x.txt b/Documentation/devicetree/bindings/mfd/tps6507x.txt
new file mode 100755 (executable)
index 0000000..8fffa3c
--- /dev/null
@@ -0,0 +1,91 @@
+TPS6507x Power Management Integrated Circuit
+
+Required properties:
+- compatible: "ti,tps6507x"
+- reg: I2C slave address
+- regulators: This is the list of child nodes that specify the regulator
+  initialization data for defined regulators. Not all regulators for the
+  given device need to be present. The definition for each of these nodes
+  is defined using the standard binding for regulators found at
+  Documentation/devicetree/bindings/regulator/regulator.txt.
+  The regulator is matched with the regulator-compatible.
+
+  The valid regulator-compatible values are:
+  tps6507x: vdcdc1, vdcdc2, vdcdc3, vldo1, vldo2
+- xxx-supply: Input voltage supply regulator.
+  These entries are required if regulators are enabled for a device.
+  Missing of these properties can cause the regulator registration
+  fails.
+  If some of input supply is powered through battery or always-on
+  supply then also it is require to have these parameters with proper
+  node handle of always on power supply.
+  tps6507x:
+       vindcdc1_2-supply: VDCDC1 and VDCDC2 input.
+       vindcdc3-supply  : VDCDC3 input.
+       vldo1_2-supply   : VLDO1 and VLDO2 input.
+
+Regulator Optional properties:
+- defdcdc_default: It's property of DCDC2 and DCDC3 regulators.
+                       0: If defdcdc pin of DCDC2/DCDC3 is pulled to GND.
+                       1: If defdcdc pin of DCDC2/DCDC3 is driven HIGH.
+  If this property is not defined, it defaults to 0 (not enabled).
+
+Example:
+
+       pmu: tps6507x@48 {
+               compatible = "ti,tps6507x";
+               reg = <0x48>;
+
+               vindcdc1_2-supply = <&vbat>;
+               vindcdc3-supply = <...>;
+               vinldo1_2-supply = <...>;
+
+               regulators {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       vdcdc1_reg: regulator@0 {
+                               regulator-compatible = "VDCDC1";
+                               reg = <0>;
+                               regulator-min-microvolt = <3150000>;
+                               regulator-max-microvolt = <3450000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+                       vdcdc2_reg: regulator@1 {
+                               regulator-compatible = "VDCDC2";
+                               reg = <1>;
+                               regulator-min-microvolt = <1710000>;
+                               regulator-max-microvolt = <3450000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               defdcdc_default = <1>;
+                       };
+                       vdcdc3_reg: regulator@2 {
+                               regulator-compatible = "VDCDC3";
+                               reg = <2>;
+                               regulator-min-microvolt = <950000>
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               defdcdc_default = <1>;
+                       };
+                       ldo1_reg: regulator@3 {
+                               regulator-compatible = "LDO1";
+                               reg = <3>;
+                               regulator-min-microvolt = <1710000>;
+                               regulator-max-microvolt = <1890000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+                       ldo2_reg: regulator@4 {
+                               regulator-compatible = "LDO2";
+                               reg = <4>;
+                               regulator-min-microvolt = <1140000>;
+                               regulator-max-microvolt = <1320000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+               };
+
+       };
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
new file mode 100644 (file)
index 0000000..dff0e5f
--- /dev/null
@@ -0,0 +1,60 @@
+* Allwinner A1X Pin Controller
+
+The pins controlled by sunXi pin controller are organized in banks,
+each bank has 32 pins.  Each pin has 7 multiplexing functions, with
+the first two functions being GPIO in and out. The configuration on
+the pins includes drive strength and pull-up.
+
+Required properties:
+- compatible: "allwinner,<soc>-pinctrl". Supported SoCs for now are:
+  sun5i-a13.
+- reg: Should contain the register physical address and length for the
+  pin controller.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices.
+
+A pinctrl node should contain at least one subnodes representing the
+pinctrl groups available on the machine. Each subnode will list the
+pins it needs, and how they should be configured, with regard to muxer
+configuration, drive strength and pullups. If one of these options is
+not set, its actual value will be unspecified.
+
+Required subnode-properties:
+
+- allwinner,pins: List of strings containing the pin name.
+- allwinner,function: Function to mux the pins listed above to.
+
+Optional subnode-properties:
+- allwinner,drive: Integer. Represents the current sent to the pin
+    0: 10 mA
+    1: 20 mA
+    2: 30 mA
+    3: 40 mA
+- allwinner,pull: Integer.
+    0: No resistor
+    1: Pull-up resistor
+    2: Pull-down resistor
+
+Examples:
+
+pinctrl@01c20800 {
+       compatible = "allwinner,sun5i-a13-pinctrl";
+       reg = <0x01c20800 0x400>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       uart1_pins_a: uart1@0 {
+               allwinner,pins = "PE10", "PE11";
+               allwinner,function = "uart1";
+               allwinner,drive = <0>;
+               allwinner,pull = <0>;
+       };
+
+       uart1_pins_b: uart1@1 {
+               allwinner,pins = "PG3", "PG4";
+               allwinner,function = "uart1";
+               allwinner,drive = <0>;
+               allwinner,pull = <0>;
+       };
+};
index 3a268127b0547e08f167635d2b14a4a04e9a3ac8..bc50899e0c81f8afcd7888f5200bcb7ef5dc32d2 100644 (file)
@@ -81,7 +81,8 @@ PA31  TXD4
 Required properties for pin configuration node:
 - atmel,pins: 4 integers array, represents a group of pins mux and config
   setting. The format is atmel,pins = <PIN_BANK PIN_BANK_NUM PERIPH CONFIG>.
-  The PERIPH 0 means gpio.
+  The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B...
+  PIN_BANK 0 is pioA, PIN_BANK 1 is pioB...
 
 Bits used for CONFIG:
 PULL_UP                (1 << 0): indicate this pin need a pull up.
@@ -126,7 +127,7 @@ pinctrl@fffff400 {
                pinctrl_dbgu: dbgu-0 {
                        atmel,pins =
                                <1 14 0x1 0x0   /* PB14 periph A */
-                                1 15 0x1 0x1>; /* PB15 periph with pullup */
+                                1 15 0x1 0x1>; /* PB15 periph with pullup */
                };
        };
 };
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra114-pinmux.txt
new file mode 100644 (file)
index 0000000..e204d00
--- /dev/null
@@ -0,0 +1,120 @@
+NVIDIA Tegra114 pinmux controller
+
+The Tegra114 pinctrl binding is very similar to the Tegra20 and Tegra30
+pinctrl binding, as described in nvidia,tegra20-pinmux.txt and
+nvidia,tegra30-pinmux.txt. In fact, this document assumes that binding as
+a baseline, and only documents the differences between the two bindings.
+
+Required properties:
+- compatible: "nvidia,tegra114-pinmux"
+- reg: Should contain the register physical address and length for each of
+  the pad control and mux registers. The first bank of address must be the
+  driver strength pad control register address and second bank address must
+  be pinmux register address.
+
+Tegra114 adds the following optional properties for pin configuration subnodes:
+- nvidia,enable-input: Integer. Enable the pin's input path. 0: no, 1: yes.
+- nvidia,open-drain: Integer. Enable open drain mode. 0: no, 1: yes.
+- nvidia,lock: Integer. Lock the pin configuration against further changes
+    until reset. 0: no, 1: yes.
+- nvidia,io-reset: Integer. Reset the IO path. 0: no, 1: yes.
+- nvidia,rcv-sel: Integer. Select VIL/VIH receivers. 0: normal, 1: high.
+- nvidia,drive-type: Integer. Valid range 0...3.
+
+As with Tegra20 and Terga30, see the Tegra TRM for complete details regarding
+which groups support which functionality.
+
+Valid values for pin and group names are:
+
+  per-pin mux groups:
+
+    These all support nvidia,function, nvidia,tristate, nvidia,pull,
+    nvidia,enable-input, nvidia,lock. Some support nvidia,open-drain,
+    nvidia,io-reset and nvidia,rcv-sel.
+
+    ulpi_data0_po1, ulpi_data1_po2, ulpi_data2_po3, ulpi_data3_po4,
+    ulpi_data4_po5, ulpi_data5_po6, ulpi_data6_po7, ulpi_data7_po0,
+    ulpi_clk_py0, ulpi_dir_py1, ulpi_nxt_py2, ulpi_stp_py3, dap3_fs_pp0,
+    dap3_din_pp1, dap3_dout_pp2, dap3_sclk_pp3, pv0, pv1, sdmmc1_clk_pz0,
+    sdmmc1_cmd_pz1, sdmmc1_dat3_py4, sdmmc1_dat2_py5, sdmmc1_dat1_py6,
+    sdmmc1_dat0_py7, clk2_out_pw5, clk2_req_pcc5, hdmi_int_pn7, ddc_scl_pv4,
+    ddc_sda_pv5, uart2_rxd_pc3, uart2_txd_pc2, uart2_rts_n_pj6,
+    uart2_cts_n_pj5, uart3_txd_pw6, uart3_rxd_pw7, uart3_cts_n_pa1,
+    uart3_rts_n_pc0, pu0, pu1, pu2, pu3, pu4, pu5, pu6, gen1_i2c_sda_pc5,
+    gen1_i2c_scl_pc4, dap4_fs_pp4, dap4_din_pp5, dap4_dout_pp6, dap4_sclk_pp7,
+    clk3_out_pee0, clk3_req_pee1, gmi_wp_n_pc7, gmi_iordy_pi5, gmi_wait_pi7,
+    gmi_adv_n_pk0, gmi_clk_pk1, gmi_cs0_n_pj0, gmi_cs1_n_pj2, gmi_cs2_n_pk3,
+    gmi_cs3_n_pk4, gmi_cs4_n_pk2, gmi_cs6_n_pi3, gmi_cs7_n_pi6, gmi_ad0_pg0,
+    gmi_ad1_pg1, gmi_ad2_pg2, gmi_ad3_pg3, gmi_ad4_pg4, gmi_ad5_pg5,
+    gmi_ad6_pg6, gmi_ad7_pg7, gmi_ad8_ph0, gmi_ad9_ph1, gmi_ad10_ph2,
+    gmi_ad11_ph3, gmi_ad12_ph4, gmi_ad13_ph5, gmi_ad14_ph6, gmi_ad15_ph7,
+    gmi_a16_pj7, gmi_a17_pb0, gmi_a18_pb1, gmi_a19_pk7, gmi_wr_n_pi0,
+    gmi_oe_n_pi1, gmi_dqs_p_pj3, gmi_rst_n_pi4, gen2_i2c_scl_pt5,
+    gen2_i2c_sda_pt6, sdmmc4_clk_pcc4, sdmmc4_cmd_pt7, sdmmc4_dat0_paa0,
+    sdmmc4_dat1_paa1, sdmmc4_dat2_paa2, sdmmc4_dat3_paa3, sdmmc4_dat4_paa4,
+    sdmmc4_dat5_paa5, sdmmc4_dat6_paa6, sdmmc4_dat7_paa7, cam_mclk_pcc0,
+    pcc1, pbb0, cam_i2c_scl_pbb1, cam_i2c_sda_pbb2, pbb3, pbb4, pbb5, pbb6,
+    pbb7, pcc2, pwr_i2c_scl_pz6, pwr_i2c_sda_pz7, kb_row0_pr0, kb_row1_pr1,
+    kb_row2_pr2, kb_row3_pr3, kb_row4_pr4, kb_row5_pr5, kb_row6_pr6,
+    kb_row7_pr7, kb_row8_ps0, kb_row9_ps1, kb_row10_ps2, kb_col0_pq0,
+    kb_col1_pq1, kb_col2_pq2, kb_col3_pq3, kb_col4_pq4, kb_col5_pq5,
+    kb_col6_pq6, kb_col7_pq7, clk_32k_out_pa0, sys_clk_req_pz5, core_pwr_req,
+    cpu_pwr_req, pwr_int_n, owr, dap1_fs_pn0, dap1_din_pn1, dap1_dout_pn2,
+    dap1_sclk_pn3, clk1_req_pee2, clk1_out_pw4, spdif_in_pk6, spdif_out_pk5,
+    dap2_fs_pa2, dap2_din_pa4, dap2_dout_pa5, dap2_sclk_pa3, dvfs_pwm_px0,
+    gpio_x1_aud_px1, gpio_x3_aud_px3, dvfs_clk_px2, gpio_x4_aud_px4,
+    gpio_x5_aud_px5, gpio_x6_aud_px6, gpio_x7_aud_px7, sdmmc3_clk_pa6,
+    sdmmc3_cmd_pa7, sdmmc3_dat0_pb7, sdmmc3_dat1_pb6, sdmmc3_dat2_pb5,
+    sdmmc3_dat3_pb4, hdmi_cec_pee3, sdmmc1_wp_n_pv3, sdmmc3_cd_n_pv2,
+    gpio_w2_aud_pw2, gpio_w3_aud_pw3, usb_vbus_en0_pn4, usb_vbus_en1_pn5,
+    sdmmc3_clk_lb_in_pee5, sdmmc3_clk_lb_out_pee4, reset_out_n.
+
+  drive groups:
+
+    These all support nvidia,pull-down-strength, nvidia,pull-up-strength,
+    nvidia,slew-rate-rising, nvidia,slew-rate-falling. Most but not all
+    support nvidia,high-speed-mode, nvidia,schmitt, nvidia,low-power-mode
+    and nvidia,drive-type.
+
+    ao1, ao2, at1, at2, at3, at4, at5, cdev1, cdev2, dap1, dap2, dap3, dap4,
+    dbg, sdio3, spi, uaa, uab, uart2, uart3, sdio1, ddc, gma, gme, gmf, gmg,
+    gmh, owr, uda.
+
+Example:
+
+       pinmux: pinmux {
+               compatible = "nvidia,tegra114-pinmux";
+               reg = <0x70000868 0x148         /* Pad control registers */
+                      0x70003000 0x40c>;       /* PinMux registers */
+       };
+
+Example board file extract:
+
+       pinctrl {
+               sdmmc4_default: pinmux {
+                       sdmmc4_clk_pcc4 {
+                               nvidia,pins = "sdmmc4_clk_pcc4",
+                               nvidia,function = "sdmmc4";
+                               nvidia,pull = <0>;
+                               nvidia,tristate = <0>;
+                       };
+                       sdmmc4_dat0_paa0 {
+                               nvidia,pins = "sdmmc4_dat0_paa0",
+                                               "sdmmc4_dat1_paa1",
+                                               "sdmmc4_dat2_paa2",
+                                               "sdmmc4_dat3_paa3",
+                                               "sdmmc4_dat4_paa4",
+                                               "sdmmc4_dat5_paa5",
+                                               "sdmmc4_dat6_paa6",
+                                               "sdmmc4_dat7_paa7";
+                               nvidia,function = "sdmmc4";
+                               nvidia,pull = <2>;
+                               nvidia,tristate = <0>;
+                       };
+               };
+       };
+
+       sdhci@78000400 {
+               pinctrl-names = "default";
+               pinctrl-0 = <&sdmmc4_default>;
+       };
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
new file mode 100644 (file)
index 0000000..9a2f3f4
--- /dev/null
@@ -0,0 +1,140 @@
+ST Ericsson Nomadik pinmux controller
+
+Required properties:
+- compatible: "stericsson,nmk-pinctrl", "stericsson,nmk-pinctrl-db8540",
+              "stericsson,nmk-pinctrl-stn8815"
+- reg: Should contain the register physical address and length of the PRCMU.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+ST Ericsson's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as input, output, pull up, pull down...
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Required subnode-properties:
+- ste,pins : An array of strings. Each string contains the name of a pin or
+    group.
+
+Optional subnode-properties:
+- ste,function: A string containing the name of the function to mux to the
+  pin or group.
+
+- ste,config: Handle of pin configuration node (e.g. ste,config = <&slpm_in_wkup_pdis>)
+
+- ste,input : <0/1/2>
+       0: input with no pull
+       1: input with pull up,
+       2: input with pull down,
+
+- ste,output: <0/1/2>
+       0: output low,
+       1: output high,
+       2: output (value is not specified).
+
+- ste,sleep: <0/1>
+       0: sleep mode disable,
+       1: sleep mode enable.
+
+- ste,sleep-input: <0/1/2/3>
+       0: sleep input with no pull,
+       1: sleep input with pull up,
+       2: sleep input with pull down.
+       3: sleep input and keep last input configuration (no pull, pull up or pull down).
+
+- ste,sleep-output: <0/1/2>
+       0: sleep output low,
+       1: sleep output high,
+       2: sleep output (value is not specified).
+
+- ste,sleep-gpio: <0/1>
+       0: disable sleep gpio mode,
+       1: enable sleep gpio mode.
+
+- ste,sleep-wakeup: <0/1>
+       0: wake-up detection enabled,
+       1: wake-up detection disabled.
+
+- ste,sleep-pull-disable: <0/1>
+       0: GPIO pull-up or pull-down resistor is enabled, when pin is an input,
+       1: GPIO pull-up and pull-down resistor are disabled.
+
+Example board file extract:
+
+       pinctrl@80157000 {
+               compatible = "stericsson,nmk-pinctrl";
+               reg = <0x80157000 0x2000>;
+
+               pinctrl-names = "default";
+
+               slpm_in_wkup_pdis: slpm_in_wkup_pdis {
+                       ste,sleep = <1>;
+                       ste,sleep-input = <3>;
+                       ste,sleep-wakeup = <1>;
+                       ste,sleep-pull-disable = <0>;
+               };
+
+               slpm_out_hi_wkup_pdis: slpm_out_hi_wkup_pdis {
+                       ste,sleep = <1>;
+                       ste,sleep-output = <1>;
+                       ste,sleep-wakeup = <1>;
+                       ste,sleep-pull-disable = <0>;
+               };
+
+               slpm_out_wkup_pdis: slpm_out_wkup_pdis {
+                       ste,sleep = <1>;
+                       ste,sleep-output = <2>;
+                       ste,sleep-wakeup = <1>;
+                       ste,sleep-pull-disable = <0>;
+               };
+
+               uart0 {
+                       uart0_default_mux: uart0_mux {
+                               u0_default_mux {
+                                       ste,function = "u0";
+                                       ste,pins = "u0_a_1";
+                               };
+                       };
+                       uart0_default_mode: uart0_default {
+                               uart0_default_cfg1 {
+                                       ste,pins = "GPIO0", "GPIO2";
+                                       ste,input = <1>;
+                               };
+
+                               uart0_default_cfg2 {
+                                       ste,pins = "GPIO1", "GPIO3";
+                                       ste,output = <1>;
+                               };
+                       };
+                       uart0_sleep_mode: uart0_sleep {
+                               uart0_sleep_cfg1 {
+                                       ste,pins = "GPIO0", "GPIO2";
+                                       ste,config = <&slpm_in_wkup_pdis>;
+                               };
+                               uart0_sleep_cfg2 {
+                                       ste,pins = "GPIO1";
+                                       ste,config = <&slpm_out_hi_wkup_pdis>;
+                               };
+                               uart0_sleep_cfg3 {
+                                       ste,pins = "GPIO3";
+                                       ste,config = <&slpm_out_wkup_pdis>;
+                               };
+                       };
+               };
+       };
+
+       uart@80120000 {
+               compatible = "arm,pl011", "arm,primecell";
+               reg = <0x80120000 0x1000>;
+               interrupts = <0 11 0x4>;
+
+               pinctrl-names = "default","sleep";
+               pinctrl-0 = <&uart0_default_mux>, <&uart0_default_mode>;
+               pinctrl-1 = <&uart0_sleep_mode>;
+       };
diff --git a/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt b/Documentation/devicetree/bindings/power_supply/qnap-poweroff.txt
new file mode 100644 (file)
index 0000000..9a599d2
--- /dev/null
@@ -0,0 +1,13 @@
+* QNAP Power Off
+
+QNAP NAS devices have a microcontroller controlling the main power
+supply. This microcontroller is connected to UART1 of the Kirkwood and
+Orion5x SoCs. Sending the charactor 'A', at 19200 baud, tells the
+microcontroller to turn the power off. This driver adds a handler to
+pm_power_off which is called to turn the power off.
+
+Required Properties:
+- compatible: Should be "qnap,power-off"
+
+- reg: Address and length of the register set for UART1
+- clocks: tclk clock
diff --git a/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt b/Documentation/devicetree/bindings/power_supply/restart-poweroff.txt
new file mode 100644 (file)
index 0000000..5776e68
--- /dev/null
@@ -0,0 +1,8 @@
+* Restart Power Off
+
+Buffalo Linkstation LS-XHL and LS-CHLv2, and other devices power off
+by restarting and letting u-boot keep hold of the machine until the
+user presses a button.
+
+Required Properties:
+- compatible: Should be "restart-poweroff"
index 357758cb6e92f6078da84aa8bd516f8a87af21bc..758eae24082a56567fbad2c9069a66606f7c4708 100644 (file)
@@ -9,6 +9,11 @@ Required properties:
 - anatop-min-voltage: Minimum voltage of this regulator
 - anatop-max-voltage: Maximum voltage of this regulator
 
+Optional properties:
+- anatop-delay-reg-offset: Anatop MFD step time register offset
+- anatop-delay-bit-shift: Bit shift for the step time register
+- anatop-delay-bit-width: Number of bits used in the step time register
+
 Any property defined as part of the core regulator
 binding, defined in regulator.txt, can also be used.
 
@@ -23,6 +28,9 @@ Example:
                anatop-reg-offset = <0x140>;
                anatop-vol-bit-shift = <9>;
                anatop-vol-bit-width = <5>;
+               anatop-delay-reg-offset = <0x170>;
+               anatop-delay-bit-shift = <24>;
+               anatop-delay-bit-width = <2>;
                anatop-min-bit-val = <1>;
                anatop-min-voltage = <725000>;
                anatop-max-voltage = <1300000>;
diff --git a/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt b/Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt
new file mode 100644 (file)
index 0000000..a35ff99
--- /dev/null
@@ -0,0 +1,152 @@
+* Samsung S5M8767 Voltage and Current Regulator
+
+The Samsung S5M8767 is a multi-function device which includes volatage and
+current regulators, rtc, charger controller and other sub-blocks. It is
+interfaced to the host controller using a i2c interface. Each sub-block is
+addressed by the host system using different i2c slave address. This document
+describes the bindings for 'pmic' sub-block of s5m8767.
+
+Required properties:
+- compatible: Should be "samsung,s5m8767-pmic".
+- reg: Specifies the i2c slave address of the pmic block. It should be 0x66.
+
+- s5m8767,pmic-buck2-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
+  units for buck2 when changing voltage using gpio dvs. Refer to [1] below
+  for additional information.
+
+- s5m8767,pmic-buck3-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
+  units for buck3 when changing voltage using gpio dvs. Refer to [1] below
+  for additional information.
+
+- s5m8767,pmic-buck4-dvs-voltage: A set of 8 voltage values in micro-volt (uV)
+  units for buck4 when changing voltage using gpio dvs. Refer to [1] below
+  for additional information.
+
+- s5m8767,pmic-buck-ds-gpios: GPIO specifiers for three host gpio's used
+  for selecting GPIO DVS lines. It is one-to-one mapped to dvs gpio lines.
+
+[1] If none of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional
+    property is specified, the 's5m8767,pmic-buck[2/3/4]-dvs-voltage'
+    property should specify atleast one voltage level (which would be a
+    safe operating voltage).
+
+    If either of the 's5m8767,pmic-buck[2/3/4]-uses-gpio-dvs' optional
+    property is specified, then all the eight voltage values for the
+    's5m8767,pmic-buck[2/3/4]-dvs-voltage' should be specified.
+
+Optional properties:
+- interrupt-parent: Specifies the phandle of the interrupt controller to which
+  the interrupts from s5m8767 are delivered to.
+- interrupts: Interrupt specifiers for two interrupt sources.
+  - First interrupt specifier is for 'irq1' interrupt.
+  - Second interrupt specifier is for 'alert' interrupt.
+- s5m8767,pmic-buck2-uses-gpio-dvs: 'buck2' can be controlled by gpio dvs.
+- s5m8767,pmic-buck3-uses-gpio-dvs: 'buck3' can be controlled by gpio dvs.
+- s5m8767,pmic-buck4-uses-gpio-dvs: 'buck4' can be controlled by gpio dvs.
+
+Additional properties required if either of the optional properties are used:
+
+- s5m8767,pmic-buck234-default-dvs-idx: Default voltage setting selected from
+  the possible 8 options selectable by the dvs gpios. The value of this
+  property should be between 0 and 7. If not specified or if out of range, the
+  default value of this property is set to 0.
+
+- s5m8767,pmic-buck-dvs-gpios: GPIO specifiers for three host gpio's used
+  for dvs. The format of the gpio specifier depends in the gpio controller.
+
+Regulators: The regulators of s5m8767 that have to be instantiated should be
+included in a sub-node named 'regulators'. Regulator nodes included in this
+sub-node should be of the format as listed below.
+
+       regulator_name {
+               ldo1_reg: LDO1 {
+                       regulator-name = "VDD_ALIVE_1.0V";
+                       regulator-min-microvolt = <1100000>;
+                       regulator-max-microvolt = <1100000>;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       op_mode = <1>; /* Normal Mode */
+               };
+       };
+The above regulator entries are defined in regulator bindings documentation
+except op_mode description.
+       - op_mode: describes the different operating modes of the LDO's with
+               power mode change in SOC. The different possible values are,
+               0 - always off mode
+               1 - on in normal mode
+               2 - low power mode
+               3 - suspend mode
+
+The following are the names of the regulators that the s5m8767 pmic block
+supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
+as per the datasheet of s5m8767.
+
+       - LDOn
+                 - valid values for n are 1 to 28
+                 - Example: LDO0, LD01, LDO28
+       - BUCKn
+                 - valid values for n are 1 to 9.
+                 - Example: BUCK1, BUCK2, BUCK9
+
+The bindings inside the regulator nodes use the standard regulator bindings
+which are documented elsewhere.
+
+Example:
+
+       s5m8767_pmic@66 {
+               compatible = "samsung,s5m8767-pmic";
+               reg = <0x66>;
+
+               s5m8767,pmic-buck2-uses-gpio-dvs;
+               s5m8767,pmic-buck3-uses-gpio-dvs;
+               s5m8767,pmic-buck4-uses-gpio-dvs;
+
+               s5m8767,pmic-buck-default-dvs-idx = <0>;
+
+               s5m8767,pmic-buck-dvs-gpios = <&gpx0 0 1 0 0>, /* DVS1 */
+                                                <&gpx0 1 1 0 0>, /* DVS2 */
+                                                <&gpx0 2 1 0 0>; /* DVS3 */
+
+               s5m8767,pmic-buck-ds-gpios = <&gpx2 3 1 0 0>, /* SET1 */
+                                               <&gpx2 4 1 0 0>, /* SET2 */
+                                               <&gpx2 5 1 0 0>; /* SET3 */
+
+               s5m8767,pmic-buck2-dvs-voltage = <1350000>, <1300000>,
+                                                <1250000>, <1200000>,
+                                                <1150000>, <1100000>,
+                                                <1000000>, <950000>;
+
+               s5m8767,pmic-buck3-dvs-voltage = <1100000>, <1100000>,
+                                                <1100000>, <1100000>,
+                                                <1000000>, <1000000>,
+                                                <1000000>, <1000000>;
+
+               s5m8767,pmic-buck4-dvs-voltage = <1200000>, <1200000>,
+                                                <1200000>, <1200000>,
+                                                <1200000>, <1200000>,
+                                                <1200000>, <1200000>;
+
+               regulators {
+                       ldo1_reg: LDO1 {
+                               regulator-name = "VDD_ABB_3.3V";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
+                               op_mode = <1>; /* Normal Mode */
+                       };
+
+                       ldo2_reg: LDO2 {
+                               regulator-name = "VDD_ALIVE_1.1V";
+                               regulator-min-microvolt = <1100000>;
+                               regulator-max-microvolt = <1100000>;
+                               regulator-always-on;
+                       };
+
+                       buck1_reg: BUCK1 {
+                               regulator-name = "VDD_MIF_1.2V";
+                               regulator-min-microvolt = <950000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/regulator/tps51632-regulator.txt b/Documentation/devicetree/bindings/regulator/tps51632-regulator.txt
new file mode 100644 (file)
index 0000000..2f7e44a
--- /dev/null
@@ -0,0 +1,27 @@
+TPS51632 Voltage regulators
+
+Required properties:
+- compatible: Must be "ti,tps51632"
+- reg: I2C slave address
+
+Optional properties:
+- ti,enable-pwm-dvfs: Enable the DVFS voltage control through the PWM interface.
+- ti,dvfs-step-20mV: The 20mV step voltage when PWM DVFS enabled. Missing this
+       will set 10mV step voltage in PWM DVFS mode. In normal mode, the voltage
+       step is 10mV as per datasheet.
+
+Any property defined as part of the core regulator binding, defined in
+regulator.txt, can also be used.
+
+Example:
+
+       tps51632 {
+               compatible = "ti,tps51632";
+               reg =  <0x43>;
+               regulator-name = "tps51632-vout";
+               regulator-min-microvolt = <500000>;
+               regulator-max-microvolt = <1500000>;
+               regulator-boot-on;
+               ti,enable-pwm-dvfs;
+               ti,dvfs-step-20mV;
+       };
index 902b1b1f568e39b16a26e0ca6278f9225ea1fd9e..15321373ec8d4113acfba3f4917aa30fe96bb53b 100644 (file)
@@ -50,6 +50,7 @@ simtek
 sirf   SiRF Technology, Inc.
 snps   Synopsys, Inc.
 st     STMicroelectronics
+ste    ST-Ericsson
 stericsson     ST-Ericsson
 ti     Texas Instruments
 via    VIA Technologies, Inc.
index 8fbd8b46ee342c502b441d769b7334cdf954ec13..dcf338e62b71108a3a02e0198edf0eb564930121 100644 (file)
@@ -175,9 +175,9 @@ consists of multiple segments as described below.
                                             align with the zone size <-|
                  |-> align with the segment size
      _________________________________________________________________________
-    |            |            |    Node     |   Segment   |   Segment  |      |
-    | Superblock | Checkpoint |   Address   |    Info.    |   Summary  | Main |
-    |    (SB)    |   (CP)     | Table (NAT) | Table (SIT) | Area (SSA) |      |
+    |            |            |   Segment   |    Node     |   Segment  |      |
+    | Superblock | Checkpoint |    Info.    |   Address   |   Summary  | Main |
+    |    (SB)    |   (CP)     | Table (SIT) | Table (NAT) | Area (SSA) |      |
     |____________|_____2______|______N______|______N______|______N_____|__N___|
                                                                        .      .
                                                              .                .
@@ -200,14 +200,14 @@ consists of multiple segments as described below.
  : It contains file system information, bitmaps for valid NAT/SIT sets, orphan
    inode lists, and summary entries of current active segments.
 
-- Node Address Table (NAT)
- : It is composed of a block address table for all the node blocks stored in
-   Main area.
-
 - Segment Information Table (SIT)
  : It contains segment information such as valid block count and bitmap for the
    validity of all the blocks.
 
+- Node Address Table (NAT)
+ : It is composed of a block address table for all the node blocks stored in
+   Main area.
+
 - Segment Summary Area (SSA)
  : It contains summary entries which contains the owner information of all the
    data and node blocks stored in Main area.
@@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are
 valid, as shown as below.
 
   +--------+----------+---------+
-  |   CP   |    NAT   |   SIT   |
+  |   CP   |    SIT   |   NAT   |
   +--------+----------+---------+
   .         .          .          .
   .            .              .              .
   .               .                 .                 .
   +-------+-------+--------+--------+--------+--------+
-  | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 |
+  | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 |
   +-------+-------+--------+--------+--------+--------+
      |             ^                          ^
      |             |                          |
old mode 100755 (executable)
new mode 100644 (file)
index 3374c085678d694e450dbbab29c94d7c928ddde9..fec5a9bf755fb33b201dba540e02b4475a33b4a7 100644 (file)
@@ -66,6 +66,7 @@ Process               Processor                                       TjMax(C)
                i5 3470T                                        91
 
 32nm           Core i3/i5/i7 Processors
+               i7 2600                                         98
                i7 660UM/640/620, 640LM/620, 620M, 610E         105
                i5 540UM/520/430, 540M/520/450/430              105
                i3 330E, 370M/350/330                           90 rPGA, 105 BGA
@@ -79,7 +80,10 @@ Process              Processor                                       TjMax(C)
                P4505/P4500                                     90
 
 32nm           Atom Processors
+               S1260/1220                                      95
+               S1240                                           102
                Z2460                                           90
+               Z2760                                           90
                D2700/2550/2500                                 100
                N2850/2800/2650/2600                            100
 
@@ -98,6 +102,7 @@ Process              Processor                                       TjMax(C)
 
 45nm           Atom Processors
                D525/510/425/410                                100
+               K525/510/425/410                                100
                Z670/650                                        90
                Z560/550/540/530P/530/520PT/520/515/510PT/510P  90
                Z510/500                                        90
@@ -107,7 +112,11 @@ Process            Processor                                       TjMax(C)
                330/230                                         125
                E680/660/640/620                                90
                E680T/660T/640T/620T                            110
+               E665C/645C                                      90
+               E665CT/645CT                                    110
                CE4170/4150/4110                                110
+               CE4200 series                                   unknown
+               CE5300 series                                   unknown
 
 45nm           Core2 Processors
                Solo ULV SU3500/3300                            100
diff --git a/Documentation/hwmon/ina209 b/Documentation/hwmon/ina209
new file mode 100644 (file)
index 0000000..672501d
--- /dev/null
@@ -0,0 +1,93 @@
+Kernel driver ina209
+=====================
+
+Supported chips:
+  * Burr-Brown / Texas Instruments INA209
+    Prefix: 'ina209'
+    Addresses scanned: -
+    Datasheet:
+        http://www.ti.com/lit/gpn/ina209
+
+Author: Paul Hays <Paul.Hays@cattail.ca>
+Author: Ira W. Snyder <iws@ovro.caltech.edu>
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+The TI / Burr-Brown INA209 monitors voltage, current, and power on the high side
+of a D.C. power supply. It can perform measurements and calculations in the
+background to supply readings at any time. It includes a programmable
+calibration multiplier to scale the displayed current and power values.
+
+
+Sysfs entries
+-------------
+
+The INA209 chip is highly configurable both via hardwiring and via
+the I2C bus. See the datasheet for details.
+
+This tries to expose most monitoring features of the hardware via
+sysfs. It does not support every feature of this chip.
+
+
+in0_input              shunt voltage (mV)
+in0_input_highest      shunt voltage historical maximum reading (mV)
+in0_input_lowest       shunt voltage historical minimum reading (mV)
+in0_reset_history      reset shunt voltage history
+in0_max                        shunt voltage max alarm limit (mV)
+in0_min                        shunt voltage min alarm limit (mV)
+in0_crit_max           shunt voltage crit max alarm limit (mV)
+in0_crit_min           shunt voltage crit min alarm limit (mV)
+in0_max_alarm          shunt voltage max alarm limit exceeded
+in0_min_alarm          shunt voltage min alarm limit exceeded
+in0_crit_max_alarm     shunt voltage crit max alarm limit exceeded
+in0_crit_min_alarm     shunt voltage crit min alarm limit exceeded
+
+in1_input              bus voltage (mV)
+in1_input_highest      bus voltage historical maximum reading (mV)
+in1_input_lowest       bus voltage historical minimum reading (mV)
+in1_reset_history      reset bus voltage history
+in1_max                        bus voltage max alarm limit (mV)
+in1_min                        bus voltage min alarm limit (mV)
+in1_crit_max           bus voltage crit max alarm limit (mV)
+in1_crit_min           bus voltage crit min alarm limit (mV)
+in1_max_alarm          bus voltage max alarm limit exceeded
+in1_min_alarm          bus voltage min alarm limit exceeded
+in1_crit_max_alarm     bus voltage crit max alarm limit exceeded
+in1_crit_min_alarm     bus voltage crit min alarm limit exceeded
+
+power1_input           power measurement (uW)
+power1_input_highest   power historical maximum reading (uW)
+power1_reset_history   reset power history
+power1_max             power max alarm limit (uW)
+power1_crit            power crit alarm limit (uW)
+power1_max_alarm       power max alarm limit exceeded
+power1_crit_alarm      power crit alarm limit exceeded
+
+curr1_input            current measurement (mA)
+
+update_interval                data conversion time; affects number of samples used
+                       to average results for shunt and bus voltages.
+
+General Remarks
+---------------
+
+The power and current registers in this chip require that the calibration
+register is programmed correctly before they are used. Normally this is expected
+to be done in the BIOS. In the absence of BIOS programming, the shunt resistor
+voltage can be provided using platform data. The driver uses platform data from
+the ina2xx driver for this purpose. If calibration register data is not provided
+via platform data, the driver checks if the calibration register has been
+programmed (ie has a value not equal to zero). If so, this value is retained.
+Otherwise, a default value reflecting a shunt resistor value of 10 mOhm is
+programmed into the calibration register.
+
+
+Output Pins
+-----------
+
+Output pin programming is a board feature which depends on the BIOS. It is
+outside the scope of a hardware monitoring driver to enable or disable output
+pins.
index 8386aadc0a8214df209d49c1c325b630c8392c5c..c263740f0cba83b5c3f17e95ecae83326da06458 100644 (file)
@@ -30,6 +30,14 @@ Supported chips:
     Prefix: 'it8728'
     Addresses scanned: from Super I/O config space (8 I/O ports)
     Datasheet: Not publicly available
+  * IT8771E
+    Prefix: 'it8771'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not publicly available
+  * IT8772E
+    Prefix: 'it8772'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not publicly available
   * IT8782F
     Prefix: 'it8782'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -83,8 +91,8 @@ Description
 -----------
 
 This driver implements support for the IT8705F, IT8712F, IT8716F,
-IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, IT8781F, IT8782F,
-IT8783E/F, and SiS950 chips.
+IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E, IT8771E, IT8772E,
+IT8782F, IT8783E/F, and SiS950 chips.
 
 These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
 joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -118,8 +126,8 @@ The IT8726F is just bit enhanced IT8716F with additional hardware
 for AMD power sequencing. Therefore the chip will appear as IT8716F
 to userspace applications.
 
-The IT8728F is considered compatible with the IT8721F, until a datasheet
-becomes available (hopefully.)
+The IT8728F, IT8771E, and IT8772E are considered compatible with the IT8721F,
+until a datasheet becomes available (hopefully.)
 
 Temperatures are measured in degrees Celsius. An alarm is triggered once
 when the Overtemperature Shutdown limit is crossed.
index 66ecb9fc82468c46232c2744af6012fd2707906c..1650771212382296f6a63b321fce6b322dcba27e 100644 (file)
@@ -17,12 +17,13 @@ Supported chips:
   * Maxim MAX6604
     Datasheets:
        http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
-  * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843
+  * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP98244, MCP9843
     Datasheets:
        http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
+       http://ww1.microchip.com/downloads/en/DeviceDoc/22327A.pdf
   * NXP Semiconductors SE97, SE97B, SE98, SE98A
     Datasheets:
        http://www.nxp.com/documents/data_sheet/SE97.pdf
diff --git a/Documentation/hwmon/lm73 b/Documentation/hwmon/lm73
new file mode 100644 (file)
index 0000000..8af059d
--- /dev/null
@@ -0,0 +1,90 @@
+Kernel driver lm73
+==================
+
+Supported chips:
+  * Texas Instruments LM73
+    Prefix: 'lm73'
+    Addresses scanned: I2C 0x48, 0x49, 0x4a, 0x4c, 0x4d, and 0x4e
+    Datasheet: Publicly available at the Texas Instruments website
+               http://www.ti.com/product/lm73
+
+Author: Guillaume Ligneul <guillaume.ligneul@gmail.com>
+Documentation: Chris Verges <kg4ysn@gmail.com>
+
+
+Description
+-----------
+
+The LM73 is a digital temperature sensor.  All temperature values are
+given in degrees Celsius.
+
+Measurement Resolution Support
+------------------------------
+
+The LM73 supports four resolutions, defined in terms of degrees C per
+LSB: 0.25, 0.125, 0.0625, and 0.3125.  Changing the resolution mode
+affects the conversion time of the LM73's analog-to-digital converter.
+From userspace, the desired resolution can be specified as a function of
+conversion time via the 'update_interval' sysfs attribute for the
+device.  This attribute will normalize ranges of input values to the
+maximum times defined for the resolution in the datasheet.
+
+    Resolution    Conv. Time    Input Range
+    (C/LSB)       (msec)        (msec)
+    --------------------------------------
+    0.25          14             0..14
+    0.125         28            15..28
+    0.0625        56            29..56
+    0.03125       112           57..infinity
+    --------------------------------------
+
+The following examples show how the 'update_interval' attribute can be
+used to change the conversion time:
+
+    $ echo 0 > update_interval
+    $ cat update_interval
+    14
+    $ cat temp1_input
+    24250
+
+    $ echo 22 > update_interval
+    $ cat update_interval
+    28
+    $ cat temp1_input
+    24125
+
+    $ echo 56 > update_interval
+    $ cat update_interval
+    56
+    $ cat temp1_input
+    24062
+
+    $ echo 85 > update_interval
+    $ cat update_interval
+    112
+    $ cat temp1_input
+    24031
+
+As shown here, the lm73 driver automatically adjusts any user input for
+'update_interval' via a step function.  Reading back the
+'update_interval' value after a write operation will confirm the
+conversion time actively in use.
+
+Mathematically, the resolution can be derived from the conversion time
+via the following function:
+
+   g(x) = 0.250 * [log(x/14) / log(2)]
+
+where 'x' is the output from 'update_interval' and 'g(x)' is the
+resolution in degrees C per LSB.
+
+Alarm Support
+-------------
+
+The LM73 features a simple over-temperature alarm mechanism.  This
+feature is exposed via the sysfs attributes.
+
+The attributes 'temp1_max_alarm' and 'temp1_min_alarm' are flags
+provided by the LM73 that indicate whether the measured temperature has
+passed the 'temp1_max' and 'temp1_min' thresholds, respectively.  These
+values _must_ be read to clear the registers on the LM73.
index 04482226db208698564ef281de08ff31b883dd97..47651ff341aed4cc7ad95d20c052c57250e5c827 100644 (file)
@@ -16,6 +16,16 @@ Supported chips:
     Prefixes: 'max34446'
     Addresses scanned: -
     Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf
+  * Maxim MAX34460
+    PMBus 12-Channel Voltage Monitor & Sequencer
+    Prefix: 'max34460'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34460.pdf
+  * Maxim MAX34461
+    PMBus 16-Channel Voltage Monitor & Sequencer
+    Prefix: 'max34461'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
 
 Author: Guenter Roeck <guenter.roeck@ericsson.com>
 
@@ -26,6 +36,9 @@ Description
 This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
 Power-Supply Manager, MAX34441 PMBus 5-Channel Power-Supply Manager
 and Intelligent Fan Controller, and MAX34446 PMBus Power-Supply Data Logger.
+It also supports the MAX34460 and MAX34461 PMBus Voltage Monitor & Sequencers.
+The MAX34460 supports 12 voltage channels, and the MAX34461 supports 16 voltage
+channels.
 
 The driver is a client driver to the core PMBus driver. Please see
 Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -109,3 +122,6 @@ temp[1-8]_reset_history     Write any value to reset history.
 
                        temp7 and temp8 attributes only exist for MAX34440.
                        MAX34446 only supports temp[1-3].
+
+MAX34460 supports attribute groups in[1-12] and temp[1-5].
+MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/max6697 b/Documentation/hwmon/max6697
new file mode 100644 (file)
index 0000000..6594177
--- /dev/null
@@ -0,0 +1,58 @@
+Kernel driver max6697
+=====================
+
+Supported chips:
+  * Maxim MAX6581
+    Prefix: 'max6581'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6581.pdf
+  * Maxim MAX6602
+    Prefix: 'max6602'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6602.pdf
+  * Maxim MAX6622
+    Prefix: 'max6622'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6622.pdf
+  * Maxim MAX6636
+    Prefix: 'max6636'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6636.pdf
+  * Maxim MAX6689
+    Prefix: 'max6689'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6689.pdf
+  * Maxim MAX6693
+    Prefix: 'max6693'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6693.pdf
+  * Maxim MAX6694
+    Prefix: 'max6694'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6694.pdf
+  * Maxim MAX6697
+    Prefix: 'max6697'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6697.pdf
+  * Maxim MAX6698
+    Prefix: 'max6698'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6698.pdf
+  * Maxim MAX6699
+    Prefix: 'max6699'
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6699.pdf
+
+Author:
+    Guenter Roeck <linux@roeck-us.net>
+
+Description
+-----------
+
+This driver implements support for several MAX6697 compatible temperature sensor
+chips. The chips support one local temperature sensor plus four, six, or seven
+remote temperature sensors. Remote temperature sensors are diode-connected
+thermal transitors, except for MAX6698 which supports three diode-connected
+thermal transistors plus three thermistors in addition to the local temperature
+sensor.
+
+The driver provides the following sysfs attributes. temp1 is the local (chip)
+temperature, temp[2..n] are remote temperatures. The actually supported
+per-channel attributes are chip type and channel dependent.
+
+tempX_input      RO temperature
+tempX_max        RW temperature maximum threshold
+tempX_max_alarm  RO temperature maximum threshold alarm
+tempX_crit       RW temperature critical threshold
+tempX_crit_alarm RO temperature critical threshold alarm
+tempX_fault      RO temperature diode fault (remote sensors only)
index 1f4dd855a299350ec707fc9ec34630baef29c689..79f8257dd790703f8a9285e7b498f585df87e97b 100644 (file)
@@ -722,14 +722,14 @@ add/subtract if it has been divided before the add/subtract.
 What to do if a value is found to be invalid, depends on the type of the
 sysfs attribute that is being set. If it is a continuous setting like a
 tempX_max or inX_max attribute, then the value should be clamped to its
-limits using SENSORS_LIMIT(value, min_limit, max_limit). If it is not
-continuous like for example a tempX_type, then when an invalid value is
-written, -EINVAL should be returned.
+limits using clamp_val(value, min_limit, max_limit). If it is not continuous
+like for example a tempX_type, then when an invalid value is written,
+-EINVAL should be returned.
 
 Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees):
 
        long v = simple_strtol(buf, NULL, 10) / 1000;
-       v = SENSORS_LIMIT(v, -128, 127);
+       v = clamp_val(v, -128, 127);
        /* write v to register */
 
 Example2, fan divider setting, valid values 2, 4 and 8:
index a995b41724fd8e98bf4b3b1eceacf6e2a2146ab2..3d924b6b59e915ff043e933209d7d449b6f889ce 100644 (file)
@@ -121,12 +121,26 @@ in1_max_alarm             Input voltage high alarm.
 in1_lcrit_alarm                Input voltage critical low alarm.
 in1_crit_alarm         Input voltage critical high alarm.
 
-in2_label              "vout1"
-in2_input              Measured output voltage.
-in2_lcrit              Critical minimum output Voltage.
-in2_crit               Critical maximum output voltage.
-in2_lcrit_alarm                Critical output voltage critical low alarm.
-in2_crit_alarm         Critical output voltage critical high alarm.
+in2_label              "vmon"
+in2_input              Measured voltage on VMON (ZL2004) or VDRV (ZL9101M,
+                       ZL9117M) pin. Reported voltage is 16x the voltage on the
+                       pin (adjusted internally by the chip).
+in2_lcrit              Critical minumum VMON/VDRV Voltage.
+in2_crit               Critical maximum VMON/VDRV voltage.
+in2_lcrit_alarm                VMON/VDRV voltage critical low alarm.
+in2_crit_alarm         VMON/VDRV voltage critical high alarm.
+
+                       vmon attributes are supported on ZL2004, ZL9101M,
+                       and ZL9117M only.
+
+inX_label              "vout1"
+inX_input              Measured output voltage.
+inX_lcrit              Critical minimum output Voltage.
+inX_crit               Critical maximum output voltage.
+inX_lcrit_alarm                Critical output voltage critical low alarm.
+inX_crit_alarm         Critical output voltage critical high alarm.
+
+                       X is 3 for ZL2004, ZL9101M, and ZL9117M, 2 otherwise.
 
 curr1_label            "iout1"
 curr1_input            Measured output current.
index 363e348bff9b93598587710de952a22c89f12bbe..6c723811c0a09f3ad5b2b18fa32ef12aa55cd82a 100644 (file)
@@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        real-time workloads.  It can also improve energy
                        efficiency for asymmetric multiprocessors.
 
-       rcu_nocbs_poll  [KNL,BOOT]
+       rcu_nocb_poll   [KNL,BOOT]
                        Rather than requiring that offloaded CPUs
                        (specified by rcu_nocbs= above) explicitly
                        awaken the corresponding "rcuoN" kthreads,
index 3c4e1b3b80a1f8036a8b220ddcd0210d167c62b6..fa5d8a9ae2051184ddba6ce3d1dd769414d2e59f 100644 (file)
@@ -1685,6 +1685,7 @@ explicit lock operations, described later).  These include:
 
        xchg();
        cmpxchg();
+       atomic_xchg();
        atomic_cmpxchg();
        atomic_inc_return();
        atomic_dec_return();
index da40efbef6ec5599406a1e086f7b11aaaa06e018..a2b57e0a1db04d6454253d8f8fa7b383fcb974ed 100644 (file)
@@ -972,6 +972,18 @@ pinmux core.
 Pin control requests from drivers
 =================================
 
+When a device driver is about to probe the device core will automatically
+attempt to issue pinctrl_get_select_default() on these devices.
+This way driver writers do not need to add any of the boilerplate code
+of the type found below. However when doing fine-grained state selection
+and not using the "default" state, you may have to do some device driver
+handling of the pinctrl handles and states.
+
+So if you just want to put the pins for a certain device into the default
+state and be done with it, there is nothing you need to do besides
+providing the proper mapping table. The device core will take care of
+the rest.
+
 Generally it is discouraged to let individual drivers get and enable pin
 control. So if possible, handle the pin control in platform code or some other
 place where you have access to all the affected struct device * pointers. In
@@ -1097,9 +1109,9 @@ situations that can be electrically unpleasant, you will certainly want to
 mux in and bias pins in a certain way before the GPIO subsystems starts to
 deal with them.
 
-The above can be hidden: using pinctrl hogs, the pin control driver may be
-setting up the config and muxing for the pins when it is probing,
-nevertheless orthogonal to the GPIO subsystem.
+The above can be hidden: using the device core, the pinctrl core may be
+setting up the config and muxing for the pins right before the device is
+probing, nevertheless orthogonal to the GPIO subsystem.
 
 But there are also situations where it makes sense for the GPIO subsystem
 to communicate directly with with the pinctrl subsystem, using the latter
index 6f51fed45f2d2f83ca6fcaaeb83dc1e5ee64fb6c..53d6a3c51d875771fc2966c9917aec13b17f53c6 100644 (file)
@@ -1842,6 +1842,89 @@ an error.
  # cat buffer_size_kb
 85
 
+Snapshot
+--------
+CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature
+available to all non latency tracers. (Latency tracers which
+record max latency, such as "irqsoff" or "wakeup", can't use
+this feature, since those are already using the snapshot
+mechanism internally.)
+
+Snapshot preserves a current trace buffer at a particular point
+in time without stopping tracing. Ftrace swaps the current
+buffer with a spare buffer, and tracing continues in the new
+current (=previous spare) buffer.
+
+The following debugfs files in "tracing" are related to this
+feature:
+
+  snapshot:
+
+       This is used to take a snapshot and to read the output
+       of the snapshot. Echo 1 into this file to allocate a
+       spare buffer and to take a snapshot (swap), then read
+       the snapshot from this file in the same format as
+       "trace" (described above in the section "The File
+       System"). Both reads snapshot and tracing are executable
+       in parallel. When the spare buffer is allocated, echoing
+       0 frees it, and echoing else (positive) values clear the
+       snapshot contents.
+       More details are shown in the table below.
+
+       status\input  |     0      |     1      |    else    |
+       --------------+------------+------------+------------+
+       not allocated |(do nothing)| alloc+swap |   EINVAL   |
+       --------------+------------+------------+------------+
+       allocated     |    free    |    swap    |   clear    |
+       --------------+------------+------------+------------+
+
+Here is an example of using the snapshot feature.
+
+ # echo 1 > events/sched/enable
+ # echo 1 > snapshot
+ # cat snapshot
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 71/71   #P:8
+#
+#                              _-----=> irqs-off
+#                             / _----=> need-resched
+#                            | / _---=> hardirq/softirq
+#                            || / _--=> preempt-depth
+#                            ||| /     delay
+#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+#              | |       |   ||||       |         |
+          <idle>-0     [005] d...  2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120
+           sleep-2242  [005] d...  2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120
+[...]
+          <idle>-0     [002] d...  2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120
+
+ # cat trace
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 77/77   #P:8
+#
+#                              _-----=> irqs-off
+#                             / _----=> need-resched
+#                            | / _---=> hardirq/softirq
+#                            || / _--=> preempt-depth
+#                            ||| /     delay
+#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+#              | |       |   ||||       |         |
+          <idle>-0     [007] d...  2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120
+ snapshot-test-2-2229  [002] d...  2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120
+[...]
+
+
+If you try to use this snapshot feature when current tracer is
+one of the latency tracers, you will get the following results.
+
+ # echo wakeup > current_tracer
+ # echo 1 > snapshot
+bash: echo: write error: Device or resource busy
+ # cat snapshot
+cat: snapshot: Device or resource busy
+
 -----------
 
 More details can be found in the source code, in the
index 406d82d5d2bb1e08a8cb9e4c548ce937f448b376..b443f1de0e5af37987b4be6282d57e09e285d42e 100644 (file)
@@ -57,6 +57,10 @@ Protocol 2.10:       (Kernel 2.6.31) Added a protocol for relaxed alignment
 Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
                protocol entry point.
 
+Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
+               to struct boot_params for for loading bzImage and ramdisk
+               above 4G in 64bit.
+
 **** MEMORY LAYOUT
 
 The traditional memory map for the kernel loader, used for Image or
@@ -182,7 +186,7 @@ Offset      Proto   Name            Meaning
 0230/4 2.05+   kernel_alignment Physical addr alignment required for kernel
 0234/1 2.05+   relocatable_kernel Whether kernel is relocatable or not
 0235/1 2.10+   min_alignment   Minimum alignment, as a power of two
-0236/2 N/A     pad3            Unused
+0236/2 2.12+   xloadflags      Boot protocol option flags
 0238/4 2.06+   cmdline_size    Maximum size of the kernel command line
 023C/4 2.07+   hardware_subarch Hardware subarchitecture
 0240/8 2.07+   hardware_subarch_data Subarchitecture-specific data
@@ -386,6 +390,7 @@ Protocol:   2.00+
        F  Special              (0xFF = undefined)
        10  Reserved
        11  Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de>
+       12  OVMF UEFI virtualization stack
 
   Please contact <hpa@zytor.com> if you need a bootloader ID
   value assigned.
@@ -582,6 +587,27 @@ Protocol:  2.10+
   misaligned kernel.  Therefore, a loader should typically try each
   power-of-two alignment from kernel_alignment down to this alignment.
 
+Field name:     xloadflags
+Type:           read
+Offset/size:    0x236/2
+Protocol:       2.12+
+
+  This field is a bitmask.
+
+  Bit 0 (read):        XLF_KERNEL_64
+       - If 1, this kernel has the legacy 64-bit entry point at 0x200.
+
+  Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G
+        - If 1, kernel/boot_params/cmdline/ramdisk can be above 4G.
+
+  Bit 2 (read):        XLF_EFI_HANDOVER_32
+       - If 1, the kernel supports the 32-bit EFI handoff entry point
+          given at handover_offset.
+
+  Bit 3 (read): XLF_EFI_HANDOVER_64
+       - If 1, the kernel supports the 64-bit EFI handoff entry point
+          given at handover_offset + 0x200.
+
 Field name:    cmdline_size
 Type:          read
 Offset/size:   0x238/4
index cf5437deda81a003864f9e8a5f4e533d276a189d..199f453cb4de10016030c2dd230fb9a3a3125cee 100644 (file)
@@ -19,6 +19,9 @@ Offset        Proto   Name            Meaning
 090/010        ALL     hd1_info        hd1 disk parameter, OBSOLETE!!
 0A0/010        ALL     sys_desc_table  System description table (struct sys_desc_table)
 0B0/010        ALL     olpc_ofw_header OLPC's OpenFirmware CIF and friends
+0C0/004        ALL     ext_ramdisk_image ramdisk_image high 32bits
+0C4/004        ALL     ext_ramdisk_size  ramdisk_size high 32bits
+0C8/004        ALL     ext_cmd_line_ptr  cmd_line_ptr high 32bits
 140/080        ALL     edid_info       Video mode setup (struct edid_info)
 1C0/020        ALL     efi_info        EFI 32 information (struct efi_info)
 1E0/004        ALL     alk_mem_k       Alternative mem check, in KB
@@ -27,6 +30,7 @@ Offset        Proto   Name            Meaning
 1E9/001        ALL     eddbuf_entries  Number of entries in eddbuf (below)
 1EA/001        ALL     edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
                                (below)
+1EF/001        ALL     sentinel        Used to detect broken bootloaders
 290/040        ALL     edd_mbr_sig_buffer EDD MBR signatures
 2D0/A00        ALL     e820_map        E820 memory map table
                                (array of struct e820entry)
index 915564eda14551b7b78bbf71666a72092ebf12ea..b1f98503ebda09506beadad33a74b9799e2f7cbf 100644 (file)
@@ -228,7 +228,7 @@ S:  Maintained
 F:     drivers/platform/x86/acerhdf.c
 
 ACER WMI LAPTOP EXTRAS
-M:     Joey Lee <jlee@novell.com>
+M:     "Lee, Chun-Yi" <jlee@suse.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/acer-wmi.c
@@ -648,7 +648,7 @@ F:  arch/arm/
 
 ARM SUB-ARCHITECTURES
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     MAINTAINED
+S:     Maintained
 F:     arch/arm/mach-*/
 F:     arch/arm/plat-*/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
@@ -1303,7 +1303,7 @@ F:        include/linux/dmaengine.h
 F:     include/linux/async_tx.h
 
 AT24 EEPROM DRIVER
-M:     Wolfram Sang <w.sang@pengutronix.de>
+M:     Wolfram Sang <wsa@the-dreams.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/misc/eeprom/at24.c
@@ -1351,6 +1351,14 @@ W:       http://wireless.kernel.org/en/users/Drivers/ath9k
 S:     Supported
 F:     drivers/net/wireless/ath/ath9k/
 
+WILOCITY WIL6210 WIRELESS DRIVER
+M:     Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
+L:     linux-wireless@vger.kernel.org
+L:     wil6210@qca.qualcomm.com
+S:     Supported
+W:     http://wireless.kernel.org/en/users/Drivers/wil6210
+F:     drivers/net/wireless/ath/wil6210/
+
 CARL9170 LINUX COMMUNITY WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@googlemail.com>
 L:     linux-wireless@vger.kernel.org
@@ -1481,7 +1489,7 @@ AVR32 ARCHITECTURE
 M:     Haavard Skinnemoen <hskinnemoen@gmail.com>
 M:     Hans-Christian Egtvedt <egtvedt@samfundet.no>
 W:     http://www.atmel.com/products/AVR32/
-W:     http://avr32linux.org/
+W:     http://mirror.egtvedt.no/avr32linux.org/
 W:     http://avrfreaks.net/
 S:     Maintained
 F:     arch/avr32/
@@ -1964,9 +1972,9 @@ S:        Maintained
 F:     drivers/usb/host/ohci-ep93xx.c
 
 CIRRUS LOGIC CS4270 SOUND DRIVER
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Supported
+S:     Odd Fixes
 F:     sound/soc/codecs/cs4270*
 
 CLEANCACHE API
@@ -2132,10 +2140,10 @@ S:      Maintained
 F:     tools/power/cpupower
 
 CPUSETS
-M:     Paul Menage <paul@paulmenage.org>
+M:     Li Zefan <lizefan@huawei.com>
 W:     http://www.bullopensource.org/cpuset/
 W:     http://oss.sgi.com/projects/cpusets/
-S:     Supported
+S:     Maintained
 F:     Documentation/cgroups/cpusets.txt
 F:     include/linux/cpuset.h
 F:     kernel/cpuset.c
@@ -2958,7 +2966,7 @@ S:        Maintained
 F:     drivers/net/ethernet/i825xx/eexpress.*
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <shemminger@vyatta.com>
+M:     Stephen Hemminger <stephen@networkplumber.org>
 L:     bridge@lists.linux-foundation.org
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -3183,9 +3191,9 @@ F:        include/uapi/video/
 F:     include/uapi/linux/fb.h
 
 FREESCALE DIU FRAMEBUFFER DRIVER
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     linux-fbdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/video/fsl-diu-fb.*
 
 FREESCALE DMA DRIVER
@@ -3220,9 +3228,8 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE QUICC ENGINE LIBRARY
-M:     Timur Tabi <timur@freescale.com>
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Supported
+S:     Orphan
 F:     arch/powerpc/sysdev/qe_lib/
 F:     arch/powerpc/include/asm/*qe.h
 
@@ -3241,16 +3248,16 @@ S:      Maintained
 F:     drivers/net/ethernet/freescale/ucc_geth*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Supported
+S:     Maintained
 F:     drivers/tty/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@freescale.com>
+M:     Timur Tabi <timur@tabi.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linuxppc-dev@lists.ozlabs.org
-S:     Supported
+S:     Maintained
 F:     sound/soc/fsl/fsl*
 F:     sound/soc/fsl/mpc8610_hpcd.c
 
@@ -3750,12 +3757,11 @@ S:      Maintained
 F:     drivers/i2c/i2c-stub.c
 
 I2C SUBSYSTEM
-M:     Wolfram Sang <w.sang@pengutronix.de>
+M:     Wolfram Sang <wsa@the-dreams.de>
 M:     "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
 L:     linux-i2c@vger.kernel.org
 W:     http://i2c.wiki.kernel.org/
-T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
-T:     git git://git.pengutronix.de/git/wsa/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux.git
 S:     Maintained
 F:     Documentation/i2c/
 F:     drivers/i2c/
@@ -4898,7 +4904,7 @@ S:        Maintained
 
 MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
 M:     Mirko Lindner <mlindner@marvell.com>
-M:     Stephen Hemminger <shemminger@vyatta.com>
+M:     Stephen Hemminger <stephen@networkplumber.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/marvell/sk*
@@ -5077,7 +5083,7 @@ S:        Maintained
 F:     drivers/media/radio/radio-mr800.c
 
 MSI LAPTOP SUPPORT
-M:     "Lee, Chun-Yi" <jlee@novell.com>
+M:     "Lee, Chun-Yi" <jlee@suse.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/msi-laptop.c
@@ -5173,7 +5179,7 @@ S:        Supported
 F:     drivers/infiniband/hw/nes/
 
 NETEM NETWORK EMULATOR
-M:     Stephen Hemminger <shemminger@vyatta.com>
+M:     Stephen Hemminger <stephen@networkplumber.org>
 L:     netem@lists.linux-foundation.org
 S:     Maintained
 F:     net/sched/sch_netem.c
@@ -5507,8 +5513,7 @@ M:        Benoît Cousson <b-cousson@ti.com>
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
-F:     arch/arm/mach-omap2/omap_hwmod.c
-F:     arch/arm/plat-omap/include/plat/omap_hwmod.h
+F:     arch/arm/mach-omap2/omap_hwmod.*
 
 OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
 M:     Benoît Cousson <b-cousson@ti.com>
@@ -5772,15 +5777,6 @@ L:       linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/muxes/i2c-mux-pca9541.c
 
-PCA9564/PCA9665 I2C BUS DRIVER
-M:     Wolfram Sang <w.sang@pengutronix.de>
-L:     linux-i2c@vger.kernel.org
-S:     Maintained
-F:     drivers/i2c/algos/i2c-algo-pca.c
-F:     drivers/i2c/busses/i2c-pca-*
-F:     include/linux/i2c-algo-pca.h
-F:     include/linux/i2c-pca-platform.h
-
 PCDP - PRIMARY CONSOLE AND DEBUG PORT
 M:     Khalid Aziz <khalid@gonehiking.org>
 S:     Maintained
@@ -6579,7 +6575,7 @@ F:        drivers/media/platform/s3c-camif/
 F:     include/media/s3c_camif.h
 
 SERIAL DRIVERS
-M:     Alan Cox <alan@linux.intel.com>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
 F:     drivers/tty/serial
@@ -6592,7 +6588,7 @@ F:        drivers/dma/dw_dmac_regs.h
 F:     drivers/dma/dw_dmac.c
 
 TIMEKEEPING, NTP
-M:     John Stultz <johnstul@us.ibm.com>
+M:     John Stultz <john.stultz@linaro.org>
 M:     Thomas Gleixner <tglx@linutronix.de>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
@@ -7082,7 +7078,7 @@ F:        include/uapi/sound/
 F:     sound/
 
 SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
-M:     Liam Girdwood <lrg@ti.com>
+M:     Liam Girdwood <lgirdwood@gmail.com>
 M:     Mark Brown <broonie@opensource.wolfsonmicro.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -7334,7 +7330,7 @@ S:        Odd Fixes
 F:     drivers/staging/speakup/
 
 STAGING - TI DSP BRIDGE DRIVERS
-M:     Omar Ramirez Luna <omar.ramirez@ti.com>
+M:     Omar Ramirez Luna <omar.ramirez@copitl.com>
 S:     Odd Fixes
 F:     drivers/staging/tidspbridge/
 
@@ -7537,6 +7533,11 @@ F:       drivers/net/team/
 F:     include/linux/if_team.h
 F:     include/uapi/linux/if_team.h
 
+TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT
+M:     Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>
+S:     Maintained
+F:     arch/x86/platform/ts5500/
+
 TECHNOTREND USB IR RECEIVER
 M:     Sean Young <sean@mess.org>
 L:     linux-media@vger.kernel.org
@@ -7611,6 +7612,22 @@ F:       Documentation/backlight/lp855x-driver.txt
 F:     drivers/video/backlight/lp855x_bl.c
 F:     include/linux/platform_data/lp855x.h
 
+TI LP8727 CHARGER DRIVER
+M:     Milo Kim <milo.kim@ti.com>
+S:     Maintained
+F:     drivers/power/lp8727_charger.c
+F:     include/linux/platform_data/lp8727.h
+
+TI LP8788 MFD DRIVER
+M:     Milo Kim <milo.kim@ti.com>
+S:     Maintained
+F:     drivers/iio/adc/lp8788_adc.c
+F:     drivers/leds/leds-lp8788.c
+F:     drivers/mfd/lp8788*.c
+F:     drivers/power/lp8788-charger.c
+F:     drivers/regulator/lp8788-*.c
+F:     include/linux/mfd/lp8788*.h
+
 TI TWL4030 SERIES SOC CODEC DRIVER
 M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -8526,7 +8543,7 @@ F:        Documentation/x86/
 F:     arch/x86/
 
 X86 PLATFORM DRIVERS
-M:     Matthew Garrett <mjg@redhat.com>
+M:     Matthew Garrett <matthew.garrett@nebula.com>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git
 S:     Maintained
index a1667c4bcce580a25e141672daf44b550f4ee5e9..6fccf653177055cb4e250096d33df5eeb21a8671 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 3
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Terrified Chipmunk
+EXTRAVERSION =
+NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -165,11 +165,12 @@ export srctree objtree VPATH
 # then ARCH is assigned, getting whatever value it gets normally, and 
 # SUBARCH is subsequently ignored.
 
-SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+                                 -e s/sun4u/sparc64/ \
                                  -e s/arm.*/arm/ -e s/sa110/arm/ \
                                  -e s/s390x/s390/ -e s/parisc64/parisc/ \
                                  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-                                 -e s/sh[234].*/sh/ )
+                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
 
 # Cross compiling and selecting different set of gcc/bin-utils
 # ---------------------------------------------------------------------------
index 7f8f281f2585e25ee74853bfb88183514ce3d5ca..97fb7d0365d15505d500a126588f889a7647fcd8 100644 (file)
@@ -76,6 +76,15 @@ config OPTPROBES
        depends on KPROBES && HAVE_OPTPROBES
        depends on !PREEMPT
 
+config KPROBES_ON_FTRACE
+       def_bool y
+       depends on KPROBES && HAVE_KPROBES_ON_FTRACE
+       depends on DYNAMIC_FTRACE_WITH_REGS
+       help
+        If function tracer is enabled and the arch supports full
+        passing of pt_regs to function tracing, then kprobes can
+        optimize on top of function tracing.
+
 config UPROBES
        bool "Transparent user-space probes (EXPERIMENTAL)"
        depends on UPROBE_EVENT && PERF_EVENTS
@@ -158,6 +167,9 @@ config HAVE_KRETPROBES
 config HAVE_OPTPROBES
        bool
 
+config HAVE_KPROBES_ON_FTRACE
+       bool
+
 config HAVE_NMI_WATCHDOG
        bool
 #
index 9d5904cc7712fbdd64ce3d682617d5f31be6b268..9b504af2e9667168a0ed47143bcaac4c2517745f 100644 (file)
@@ -5,7 +5,6 @@ config ALPHA
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_SYSCALL_WRAPPERS
-       select HAVE_IRQ_WORK
        select HAVE_PCSPKR_PLATFORM
        select HAVE_PERF_EVENTS
        select HAVE_DMA_ATTRS
index 14db93e4c8a83662d459a6ff1a818318dbc61eeb..dbc1760f418b4c26914a0bd6a05456d112f76d8b 100644 (file)
@@ -1139,6 +1139,7 @@ struct rusage32 {
 SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
 {
        struct rusage32 r;
+       cputime_t utime, stime;
 
        if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
                return -EINVAL;
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
        memset(&r, 0, sizeof(r));
        switch (who) {
        case RUSAGE_SELF:
-               jiffies_to_timeval32(current->utime, &r.ru_utime);
-               jiffies_to_timeval32(current->stime, &r.ru_stime);
+               task_cputime(current, &utime, &stime);
+               jiffies_to_timeval32(utime, &r.ru_utime);
+               jiffies_to_timeval32(stime, &r.ru_stime);
                r.ru_minflt = current->min_flt;
                r.ru_majflt = current->maj_flt;
                break;
index 67874b82a4edf318ae3718ae6137393140405586..6c0900a9bf5c30039bf852ea42bd3612b522244e 100644 (file)
@@ -36,7 +36,6 @@ config ARM
        select HAVE_GENERIC_HARDIRQS
        select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
        select HAVE_IDE if PCI || ISA || PCMCIA
-       select HAVE_IRQ_WORK
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
@@ -1637,7 +1636,7 @@ config ARCH_NR_GPIO
        default 355 if ARCH_U8500
        default 264 if MACH_H4700
        default 512 if SOC_OMAP5
-       default 288 if ARCH_VT8500
+       default 288 if ARCH_VT8500 || ARCH_SUNXI
        default 0
        help
          Maximum number of GPIOs in the system.
index e44da40d984f7faa18bb8a32f51f5f1fe233f20a..5ebb44fe826a9b0b36d051272910a26775e5a16f 100644 (file)
@@ -155,6 +155,7 @@ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \
 dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb
 
 targets += dtbs
+targets += $(dtb-y)
 endif
 
 # *.dtb used to be generated in the directory above. Clean out the
index 00044026ef1f470bd6bc6eb85b3409d2c0bf6556..9b82facb2561cfdb1cbc9cda25f5021668752e4a 100644 (file)
@@ -26,7 +26,7 @@
 
        memory {
                device_type = "memory";
-               reg = <0x00000000 0x20000000>; /* 512 MB */
+               reg = <0x00000000 0x40000000>; /* 1 GB */
        };
 
        soc {
index 271855a6e224758800f1146f02dd992786dab801..e041f42ed711b7e31fe93f2601fc153b2db6c2c0 100644 (file)
                };
 
                gpio0: gpio@d0018100 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018100 0x40>,
-                           <0xd0018800 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018100 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <16>, <17>, <18>, <19>;
+                       interrupts = <82>, <83>, <84>, <85>;
                };
 
                gpio1: gpio@d0018140 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018140 0x40>,
-                           <0xd0018840 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018140 0x40>;
                        ngpios = <17>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <20>, <21>, <22>;
+                       interrupts = <87>, <88>, <89>;
                };
        };
 };
index 1c1937dbce73c1d320f9689fd62fe9a05c851fd2..9e23bd8c9536d9b3d02c14a275efad5111d03f4a 100644 (file)
                };
 
                gpio0: gpio@d0018100 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018100 0x40>,
-                           <0xd0018800 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018100 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <16>, <17>, <18>, <19>;
+                       interrupts = <82>, <83>, <84>, <85>;
                };
 
                gpio1: gpio@d0018140 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018140 0x40>,
-                           <0xd0018840 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018140 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <20>, <21>, <22>, <23>;
+                       interrupts = <87>, <88>, <89>, <90>;
                };
 
                gpio2: gpio@d0018180 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018180 0x40>,
-                           <0xd0018870 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018180 0x40>;
                        ngpios = <3>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <24>;
+                       interrupts = <91>;
                };
 
                ethernet@d0034000 {
index 4905cf3a5ef85edc82dee68e0fafe978bd919f5c..965966110e3850a46eb247ca115c28ef827aa984 100644 (file)
                };
 
                gpio0: gpio@d0018100 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018100 0x40>,
-                           <0xd0018800 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018100 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <16>, <17>, <18>, <19>;
+                       interrupts = <82>, <83>, <84>, <85>;
                };
 
                gpio1: gpio@d0018140 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018140 0x40>,
-                           <0xd0018840 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018140 0x40>;
                        ngpios = <32>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <20>, <21>, <22>, <23>;
+                       interrupts = <87>, <88>, <89>, <90>;
                };
 
                gpio2: gpio@d0018180 {
-                       compatible = "marvell,armadaxp-gpio";
-                       reg = <0xd0018180 0x40>,
-                           <0xd0018870 0x30>;
+                       compatible = "marvell,orion-gpio";
+                       reg = <0xd0018180 0x40>;
                        ngpios = <3>;
                        gpio-controller;
                        #gpio-cells = <2>;
                        interrupt-controller;
                        #interrupts-cells = <2>;
-                       interrupts = <24>;
+                       interrupts = <91>;
                };
 
                ethernet@d0034000 {
index e154f242c680e0ca3db90aabf823490dcb146607..222047f1ece9bfac7ed0562540bfaff483dc89e3 100644 (file)
 
        i2c@0 {
                compatible = "i2c-gpio";
-               gpios = <&pioA 23 0 /* sda */
-                        &pioA 24 0 /* scl */
+               gpios = <&pioA 25 0 /* sda */
+                        &pioA 26 0 /* scl */
                        >;
                i2c-gpio,sda-open-drain;
                i2c-gpio,scl-open-drain;
index 68bccf41a2c6a1855c6dfd87555faabf81306cb7..cb7bcc51608d81cd51bba98ec29455973ca9daf0 100644 (file)
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <1 16 0x1 0x0   /* PB16 periph A */
+                                                        1 17 0x1 0x0   /* PB17 periph A */
+                                                        1 18 0x1 0x0>; /* PB18 periph A */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <1 19 0x1 0x0   /* PB19 periph A */
+                                                        1 20 0x1 0x0   /* PB20 periph A */
+                                                        1 21 0x1 0x0>; /* PB21 periph A */
+                                       };
+                               };
+
                                pioA: gpio@fffff400 {
                                        compatible = "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
                                compatible = "atmel,at91rm9200-ssc";
                                reg = <0xfffbc000 0x4000>;
                                interrupts = <14 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
                                status = "disabled";
                        };
 
index 32ec62cf538585bf496187e0076afabd67c7be27..271d4de026e9df4ca1fc0114209e22f4da4d8cb5 100644 (file)
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <1 0 0x2 0x0    /* PB0 periph B */
+                                                        1 1 0x2 0x0    /* PB1 periph B */
+                                                        1 2 0x2 0x0>;  /* PB2 periph B */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <1 3 0x2 0x0    /* PB3 periph B */
+                                                        1 4 0x2 0x0    /* PB4 periph B */
+                                                        1 5 0x2 0x0>;  /* PB5 periph B */
+                                       };
+                               };
+
+                               ssc1 {
+                                       pinctrl_ssc1_tx: ssc1_tx-0 {
+                                               atmel,pins =
+                                                       <1 6 0x1 0x0    /* PB6 periph A */
+                                                        1 7 0x1 0x0    /* PB7 periph A */
+                                                        1 8 0x1 0x0>;  /* PB8 periph A */
+                                       };
+
+                                       pinctrl_ssc1_rx: ssc1_rx-0 {
+                                               atmel,pins =
+                                                       <1 9 0x1 0x0    /* PB9 periph A */
+                                                        1 10 0x1 0x0   /* PB10 periph A */
+                                                        1 11 0x1 0x0>; /* PB11 periph A */
+                                       };
+                               };
+
                                pioA: gpio@fffff200 {
                                        compatible = "atmel,at91rm9200-gpio";
                                        reg = <0xfffff200 0x200>;
                                compatible = "atmel,at91rm9200-ssc";
                                reg = <0xfff98000 0x4000>;
                                interrupts = <16 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
                                status = "disabled";
                        };
 
                                compatible = "atmel,at91rm9200-ssc";
                                reg = <0xfff9c000 0x4000>;
                                interrupts = <17 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
                                status = "disabled";
                        };
 
index 231858ffd850fec57eee5a91f06aee53f4c22f7f..6b1d4cab24c2a9e62537515991f109287dbc0091 100644 (file)
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <3 0 0x1 0x0    /* PD0 periph A */
+                                                        3 1 0x1 0x0    /* PD1 periph A */
+                                                        3 2 0x1 0x0>;  /* PD2 periph A */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <3 3 0x1 0x0    /* PD3 periph A */
+                                                        3 4 0x1 0x0    /* PD4 periph A */
+                                                        3 5 0x1 0x0>;  /* PD5 periph A */
+                                       };
+                               };
+
+                               ssc1 {
+                                       pinctrl_ssc1_tx: ssc1_tx-0 {
+                                               atmel,pins =
+                                                       <3 10 0x1 0x0   /* PD10 periph A */
+                                                        3 11 0x1 0x0   /* PD11 periph A */
+                                                        3 12 0x1 0x0>; /* PD12 periph A */
+                                       };
+
+                                       pinctrl_ssc1_rx: ssc1_rx-0 {
+                                               atmel,pins =
+                                                       <3 13 0x1 0x0   /* PD13 periph A */
+                                                        3 14 0x1 0x0   /* PD14 periph A */
+                                                        3 15 0x1 0x0>; /* PD15 periph A */
+                                       };
+                               };
+
                                pioA: gpio@fffff200 {
                                        compatible = "atmel,at91rm9200-gpio";
                                        reg = <0xfffff200 0x200>;
                                compatible = "atmel,at91sam9g45-ssc";
                                reg = <0xfff9c000 0x4000>;
                                interrupts = <16 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
                                status = "disabled";
                        };
 
                                compatible = "atmel,at91sam9g45-ssc";
                                reg = <0xfffa0000 0x4000>;
                                interrupts = <17 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>;
                                status = "disabled";
                        };
 
index e9efb34f437983370649fa096c006fe1638c1e01..80e29c605d4e53de1a8f948b5acb0eb975aee641 100644 (file)
@@ -28,6 +28,7 @@
                tcb1 = &tcb1;
                i2c0 = &i2c0;
                i2c1 = &i2c1;
+               ssc0 = &ssc0;
        };
        cpus {
                cpu@0 {
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <0 24 0x2 0x0   /* PA24 periph B */
+                                                        0 25 0x2 0x0   /* PA25 periph B */
+                                                        0 26 0x2 0x0>; /* PA26 periph B */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <0 27 0x2 0x0   /* PA27 periph B */
+                                                        0 28 0x2 0x0   /* PA28 periph B */
+                                                        0 29 0x2 0x0>; /* PA29 periph B */
+                                       };
+                               };
+
                                pioA: gpio@fffff400 {
                                        compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
                                status = "disabled";
                        };
 
+                       ssc0: ssc@f0010000 {
+                               compatible = "atmel,at91sam9g45-ssc";
+                               reg = <0xf0010000 0x4000>;
+                               interrupts = <28 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+                               status = "disabled";
+                       };
+
                        usart0: serial@f801c000 {
                                compatible = "atmel,at91sam9260-usart";
                                reg = <0xf801c000 0x4000>;
index 40ac3a4eb1abc80a3560fbf6e33183bdd9dd5e72..8ecca6948d811f827e623e48f039238dad9fe6bf 100644 (file)
                                interrupts = <1 4 7>;
                        };
 
-                       ssc0: ssc@f0010000 {
-                               compatible = "atmel,at91sam9g45-ssc";
-                               reg = <0xf0010000 0x4000>;
-                               interrupts = <28 4 5>;
-                               status = "disabled";
-                       };
-
                        tcb0: timer@f8008000 {
                                compatible = "atmel,at91sam9x5-tcb";
                                reg = <0xf8008000 0x100>;
                                                atmel,pins =
                                                        <0 3 0x1 0x0>;  /* PA3 periph A */
                                        };
+
+                                       pinctrl_usart0_sck: usart0_sck-0 {
+                                               atmel,pins =
+                                                       <0 4 0x1 0x0>;  /* PA4 periph A */
+                                       };
                                };
 
                                usart1 {
 
                                        pinctrl_usart1_rts: usart1_rts-0 {
                                                atmel,pins =
-                                                       <3 27 0x3 0x0>; /* PC27 periph C */
+                                                       <2 27 0x3 0x0>; /* PC27 periph C */
                                        };
 
                                        pinctrl_usart1_cts: usart1_cts-0 {
                                                atmel,pins =
-                                                       <3 28 0x3 0x0>; /* PC28 periph C */
+                                                       <2 28 0x3 0x0>; /* PC28 periph C */
+                                       };
+
+                                       pinctrl_usart1_sck: usart1_sck-0 {
+                                               atmel,pins =
+                                                       <2 28 0x3 0x0>; /* PC29 periph C */
                                        };
                                };
 
 
                                        pinctrl_uart2_rts: uart2_rts-0 {
                                                atmel,pins =
-                                                       <0 0 0x2 0x0>;  /* PB0 periph B */
+                                                       <1 0 0x2 0x0>;  /* PB0 periph B */
                                        };
 
                                        pinctrl_uart2_cts: uart2_cts-0 {
                                                atmel,pins =
-                                                       <0 1 0x2 0x0>;  /* PB1 periph B */
+                                                       <1 1 0x2 0x0>;  /* PB1 periph B */
+                                       };
+
+                                       pinctrl_usart2_sck: usart2_sck-0 {
+                                               atmel,pins =
+                                                       <1 2 0x2 0x0>;  /* PB2 periph B */
                                        };
                                };
 
                                usart3 {
                                        pinctrl_uart3: usart3-0 {
                                                atmel,pins =
-                                                       <3 23 0x2 0x1   /* PC22 periph B with pullup */
-                                                        3 23 0x2 0x0>; /* PC23 periph B */
+                                                       <2 23 0x2 0x1   /* PC22 periph B with pullup */
+                                                        2 23 0x2 0x0>; /* PC23 periph B */
                                        };
 
                                        pinctrl_usart3_rts: usart3_rts-0 {
                                                atmel,pins =
-                                                       <3 24 0x2 0x0>; /* PC24 periph B */
+                                                       <2 24 0x2 0x0>; /* PC24 periph B */
                                        };
 
                                        pinctrl_usart3_cts: usart3_cts-0 {
                                                atmel,pins =
-                                                       <3 25 0x2 0x0>; /* PC25 periph B */
+                                                       <2 25 0x2 0x0>; /* PC25 periph B */
+                                       };
+
+                                       pinctrl_usart3_sck: usart3_sck-0 {
+                                               atmel,pins =
+                                                       <2 26 0x2 0x0>; /* PC26 periph B */
                                        };
                                };
 
                                uart0 {
                                        pinctrl_uart0: uart0-0 {
                                                atmel,pins =
-                                                       <3 8 0x3 0x0    /* PC8 periph C */
-                                                        3 9 0x3 0x1>;  /* PC9 periph C with pullup */
+                                                       <2 8 0x3 0x0    /* PC8 periph C */
+                                                        2 9 0x3 0x1>;  /* PC9 periph C with pullup */
                                        };
                                };
 
                                uart1 {
                                        pinctrl_uart1: uart1-0 {
                                                atmel,pins =
-                                                       <3 16 0x3 0x0   /* PC16 periph C */
-                                                        3 17 0x3 0x1>; /* PC17 periph C with pullup */
+                                                       <2 16 0x3 0x0   /* PC16 periph C */
+                                                        2 17 0x3 0x1>; /* PC17 periph C with pullup */
                                        };
                                };
 
 
                                        pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 {
                                                atmel,pins =
-                                                       <1 8 0x1 0x0    /* PA8 periph A */
-                                                        1 11 0x1 0x0   /* PA11 periph A */
-                                                        1 12 0x1 0x0   /* PA12 periph A */
-                                                        1 13 0x1 0x0   /* PA13 periph A */
-                                                        1 14 0x1 0x0   /* PA14 periph A */
-                                                        1 15 0x1 0x0   /* PA15 periph A */
-                                                        1 16 0x1 0x0   /* PA16 periph A */
-                                                        1 17 0x1 0x0>; /* PA17 periph A */
+                                                       <1 8 0x1 0x0    /* PB8 periph A */
+                                                        1 11 0x1 0x0   /* PB11 periph A */
+                                                        1 12 0x1 0x0   /* PB12 periph A */
+                                                        1 13 0x1 0x0   /* PB13 periph A */
+                                                        1 14 0x1 0x0   /* PB14 periph A */
+                                                        1 15 0x1 0x0   /* PB15 periph A */
+                                                        1 16 0x1 0x0   /* PB16 periph A */
+                                                        1 17 0x1 0x0>; /* PB17 periph A */
                                        };
                                };
 
                                        };
                                };
 
+                               ssc0 {
+                                       pinctrl_ssc0_tx: ssc0_tx-0 {
+                                               atmel,pins =
+                                                       <0 24 0x2 0x0   /* PA24 periph B */
+                                                        0 25 0x2 0x0   /* PA25 periph B */
+                                                        0 26 0x2 0x0>; /* PA26 periph B */
+                                       };
+
+                                       pinctrl_ssc0_rx: ssc0_rx-0 {
+                                               atmel,pins =
+                                                       <0 27 0x2 0x0   /* PA27 periph B */
+                                                        0 28 0x2 0x0   /* PA28 periph B */
+                                                        0 29 0x2 0x0>; /* PA29 periph B */
+                                       };
+                               };
+
                                pioA: gpio@fffff400 {
                                        compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
                                };
                        };
 
+                       ssc0: ssc@f0010000 {
+                               compatible = "atmel,at91sam9g45-ssc";
+                               reg = <0xf0010000 0x4000>;
+                               interrupts = <28 4 5>;
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+                               status = "disabled";
+                       };
+
                        mmc0: mmc@f0008000 {
                                compatible = "atmel,hsmci";
                                reg = <0xf0008000 0x600>;
index fddd1741743320fbdd29afbdb75133ae46673fa1..46c09801703655eb0a8fb28b8332cafa674f9b72 100644 (file)
@@ -96,8 +96,8 @@
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
index 63f2fbcfe8196823a93ac5f74f2eadebd3ae52f7..69140ba99f465ea357980c84cc232924a69071c0 100644 (file)
                        gpio-bank = <8>;
                };
 
-               pinctrl@80157000 {
-                       // This is actually the PRCMU base address
-                       reg = <0x80157000 0x2000>;
-                       compatible = "stericsson,nmk_pinctrl";
+               pinctrl {
+                       compatible = "stericsson,nmk-pinctrl";
+                       prcm = <&prcmu>;
                };
 
                usb@a03e0000 {
                        interrupts = <0 25 0x4>;
                };
 
-               prcmu@80157000 {
+               prcmu: prcmu@80157000 {
                        compatible = "stericsson,db8500-prcmu";
                        reg = <0x80157000 0x1000>;
+                       reg-names = "prcmu";
                        interrupts = <0 47 0x4>;
                        #address-cells = <1>;
                        #size-cells = <1>;
index fed7d3f9f431071af027d72a12c92dcb6f365943..cdee96fca6e25fec9a0e24208344c03621298721 100644 (file)
 };
 
 &uart0 { status = "okay"; };
-&sdio0 { status = "okay"; };
 &sata0 { status = "okay"; };
 &i2c0 { status = "okay"; };
 
+&sdio0 {
+       status = "okay";
+       /* sdio0 card detect is connected to wrong pin on CuBox */
+       cd-gpios = <&gpio0 12 1>;
+};
+
 &spi0 {
        status = "okay";
 
 };
 
 &pinctrl {
-       pinctrl-0 = <&pmx_gpio_18>;
+       pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>;
        pinctrl-names = "default";
 
+       pmx_gpio_12: pmx-gpio-12 {
+               marvell,pins = "mpp12";
+               marvell,function = "gpio";
+       };
+
        pmx_gpio_18: pmx-gpio-18 {
                marvell,pins = "mpp18";
                marvell,function = "gpio";
index 942d5761ca971dd7317c8398d5ddaddd0b0fce98..e05b18f3c33d67d6e5202083bf60f8aa65b4344f 100644 (file)
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
                fifo-depth = <0x80>;
                card-detect-delay = <200>;
                samsung,dw-mshc-ciu-div = <3>;
-               samsung,dw-mshc-sdr-timing = <2 3 3>;
-               samsung,dw-mshc-ddr-timing = <1 2 3>;
+               samsung,dw-mshc-sdr-timing = <2 3>;
+               samsung,dw-mshc-ddr-timing = <1 2>;
 
                slot@0 {
                        reg = <0>;
index 9bc6785ad228f9522c73ad0df60befa33ef798a4..77d21abfcdf73a3430f529be076cb4f17cd9e13d 100644 (file)
@@ -1,4 +1,5 @@
 /include/ "kirkwood.dtsi"
+/include/ "kirkwood-6281.dtsi"
 
 / {
        chosen {
@@ -6,6 +7,21 @@
        };
 
        ocp@f1000000 {
+               pinctrl: pinctrl@10000 {
+                       pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0
+                                       &pmx_ns2_sata0 &pmx_ns2_sata1>;
+                       pinctrl-names = "default";
+
+                       pmx_ns2_sata0: pmx-ns2-sata0 {
+                               marvell,pins = "mpp21";
+                               marvell,function = "sata0";
+                       };
+                       pmx_ns2_sata1: pmx-ns2-sata1 {
+                               marvell,pins = "mpp20";
+                               marvell,function = "sata1";
+                       };
+               };
+
                serial@12000 {
                        clock-frequency = <166666667>;
                        status = "okay";
index 110d6cbb795b384aa330c422d6409057839bb3ee..d6ab442b7011e8777efa036b551aa8e8ef438e49 100644 (file)
@@ -36,6 +36,7 @@
                        reg = <0x10100 0x40>;
                        ngpios = <32>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <35>, <36>, <37>, <38>;
                };
 
@@ -46,6 +47,7 @@
                        reg = <0x10140 0x40>;
                        ngpios = <18>;
                        interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <39>, <40>, <41>;
                };
 
index e8814fe0e27760b475b0c850880e44b0923a8b50..b4dc3ed9a3ecc95949d07e79889e53a8600d66ee 100644 (file)
@@ -48,6 +48,8 @@
 
                        macb0: ethernet@fffc4000 {
                                phy-mode = "mii";
+                               pinctrl-0 = <&pinctrl_macb_rmii
+                                            &pinctrl_macb_rmii_mii_alt>;
                                status = "okay";
                        };
 
index e61fdd47bd01d31492412be871da11db61502b41..f99f60dadf5dcde62d2e3a6bcccfdc2c3b4fed8c 100644 (file)
        memory {
                reg = <0x40000000 0x80000000>;
        };
+
+       soc {
+               pinctrl@01c20800 {
+                       compatible = "allwinner,sun4i-a10-pinctrl";
+                       reg = <0x01c20800 0x400>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       uart0_pins_a: uart0@0 {
+                               allwinner,pins = "PB22", "PB23";
+                               allwinner,function = "uart0";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
+
+                       uart0_pins_b: uart0@1 {
+                               allwinner,pins = "PF2", "PF4";
+                               allwinner,function = "uart0";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
+
+                       uart1_pins_a: uart1@0 {
+                               allwinner,pins = "PA10", "PA11";
+                               allwinner,function = "uart1";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
+               };
+       };
 };
index 498a091a4ea20c9eba86cf61287d7be24f68612b..4a1e45d4aacef5da48e4d8c6a0da0d3f135f4e3f 100644 (file)
@@ -24,6 +24,8 @@
 
        soc {
                uart1: uart@01c28400 {
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&uart1_pins_b>;
                        status = "okay";
                };
        };
index 59a2d265a98eee4960a74c4a29a49bfe8a7aa31d..e1121890fb29db8d76538a3b482f155d70fdfb61 100644 (file)
        memory {
                reg = <0x40000000 0x20000000>;
        };
+
+       soc {
+               pinctrl@01c20800 {
+                       compatible = "allwinner,sun5i-a13-pinctrl";
+                       reg = <0x01c20800 0x400>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       uart1_pins_a: uart1@0 {
+                               allwinner,pins = "PE10", "PE11";
+                               allwinner,function = "uart1";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
+
+                       uart1_pins_b: uart1@1 {
+                               allwinner,pins = "PG3", "PG4";
+                               allwinner,function = "uart1";
+                               allwinner,drive = <0>;
+                               allwinner,pull = <0>;
+                       };
+               };
+       };
 };
index 8bbc2bfef221a2449da1211b2b880aeb07364f94..8b36abea9f2edba7cc4390693324c829142b8a76 100644 (file)
                };
 
                uart0: uart@01c28000 {
-                       compatible = "ns8250";
+                       compatible = "snps,dw-apb-uart";
                        reg = <0x01c28000 0x400>;
                        interrupts = <1>;
                        reg-shift = <2>;
+                       reg-io-width = <4>;
                        clock-frequency = <24000000>;
                        status = "disabled";
                };
 
                uart1: uart@01c28400 {
-                       compatible = "ns8250";
+                       compatible = "snps,dw-apb-uart";
                        reg = <0x01c28400 0x400>;
                        interrupts = <2>;
                        reg-shift = <2>;
+                       reg-io-width = <4>;
                        clock-frequency = <24000000>;
                        status = "disabled";
                };
index 1fc405a9ecfb0a4e6cacd94ba9442d6ce75575fa..cf8071ad22d5fb5acfd1382f82d16e606c28e068 100644 (file)
@@ -45,7 +45,6 @@
                        reg = <1>;
                };
 
-/* A7s disabled till big.LITTLE patches are available...
                cpu2: cpu@2 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a7";
@@ -63,7 +62,6 @@
                        compatible = "arm,cortex-a7";
                        reg = <0x102>;
                };
-*/
        };
 
        memory@80000000 {
index 36ae03a3f5d1a07527145ff545ded90888ba0a7b..87dfa9026c5bdcb1ba9d57d718d00da671c8fe21 100644 (file)
@@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
        irq_set_chained_handler(irq, gic_handle_cascade_irq);
 }
 
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+       void __iomem *base = gic_data_dist_base(gic);
+       u32 mask, i;
+
+       for (i = mask = 0; i < 32; i += 4) {
+               mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+               mask |= mask >> 16;
+               mask |= mask >> 8;
+               if (mask)
+                       break;
+       }
+
+       if (!mask)
+               pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+       return mask;
+}
+
 static void __init gic_dist_init(struct gic_chip_data *gic)
 {
        unsigned int i;
@@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
        /*
         * Set all global interrupts to this CPU only.
         */
-       cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
+       cpumask = gic_get_cpumask(gic);
+       cpumask |= cpumask << 8;
+       cpumask |= cpumask << 16;
        for (i = 32; i < gic_irqs; i += 4)
                writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
@@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
         * Get what the GIC says our CPU mask is.
         */
        BUG_ON(cpu >= NR_GIC_CPU_IF);
-       cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
+       cpu_mask = gic_get_cpumask(gic);
        gic_cpu_map[cpu] = cpu_mask;
 
        /*
index b175577d7abb5aec9541461e35407f0f9059c04a..1ea959019fcd3c19295b8a27a458198100344fe6 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_SOC_AT91SAM9260=y
 CONFIG_SOC_AT91SAM9263=y
 CONFIG_SOC_AT91SAM9G45=y
 CONFIG_SOC_AT91SAM9X5=y
+CONFIG_SOC_AT91SAM9N12=y
 CONFIG_MACH_AT91SAM_DT=y
 CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
 CONFIG_AT91_TIMER_HZ=128
@@ -31,7 +32,7 @@ CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
-CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
+CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw"
 CONFIG_KEXEC=y
 CONFIG_AUTO_ZRELADDR=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
index 73cf03aa981e1b665d9a40f4f80847335ca49441..1c4df27f93322604398d70f9c585332e97374b10 100644 (file)
@@ -37,7 +37,7 @@
  */
 #define PAGE_OFFSET            UL(CONFIG_PAGE_OFFSET)
 #define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
-#define TASK_UNMAPPED_BASE     (UL(CONFIG_PAGE_OFFSET) / 3)
+#define TASK_UNMAPPED_BASE     ALIGN(TASK_SIZE / 3, SZ_16M)
 
 /*
  * The maximum size of a 26-bit user space task.
index 4eb6d005ffaa4b625c9f86fc8532b046d0cf1f6a..86dff32a073755f46c40c379d27a29f9aab5b829 100644 (file)
@@ -7,8 +7,14 @@
 
 #ifndef __ASSEMBLER__
 unsigned int scu_get_core_count(void __iomem *);
-void scu_enable(void __iomem *);
 int scu_power_mode(void __iomem *, unsigned int);
+
+#ifdef CONFIG_SMP
+void scu_enable(void __iomem *scu_base);
+#else
+static inline void scu_enable(void __iomem *scu_base) {}
+#endif
+
 #endif
 
 #endif
index 6809200c31fb73349df3af95cd737c0e70293ada..14f7c3b14632e22c02632faeaca9f07adcbc232f 100644 (file)
@@ -100,12 +100,14 @@ ENTRY(printch)
                b       1b
 ENDPROC(printch)
 
+#ifdef CONFIG_MMU
 ENTRY(debug_ll_addr)
                addruart r2, r3, ip
                str     r2, [r0]
                str     r3, [r1]
                mov     pc, lr
 ENDPROC(debug_ll_addr)
+#endif
 
 #else
 
index 4eee351f4668e2bbee54c3771a5389ce8900c10c..486a15ae901192fa3e26539b3d2b7003a1528e0f 100644 (file)
@@ -246,6 +246,7 @@ __create_page_tables:
 
        /*
         * Then map boot params address in r2 if specified.
+        * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
         */
        mov     r0, r2, lsr #SECTION_SHIFT
        movs    r0, r0, lsl #SECTION_SHIFT
@@ -253,6 +254,8 @@ __create_page_tables:
        addne   r3, r3, #PAGE_OFFSET
        addne   r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
        orrne   r6, r7, r0
+       strne   r6, [r3], #1 << PMD_ORDER
+       addne   r6, r6, #1 << SECTION_SHIFT
        strne   r6, [r3]
 
 #ifdef CONFIG_DEBUG_LL
@@ -331,7 +334,7 @@ ENTRY(secondary_startup)
         * as it has already been validated by the primary processor.
         */
 #ifdef CONFIG_ARM_VIRT_EXT
-       bl      __hyp_stub_install
+       bl      __hyp_stub_install_secondary
 #endif
        safe_svcmode_maskall r9
 
index 65b2417aebce0beed61a7510702168cad2d9c3dc..1315c4ccfa563a04928347ec560ed618dcd3f958 100644 (file)
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
         * immediately.
         */
        compare_cpu_mode_with_primary   r4, r5, r6, r7
-       bxne    lr
+       movne   pc, lr
 
        /*
         * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
         */
 
        cmp     r4, #HYP_MODE
-       bxne    lr                      @ give up if the CPU is not in HYP mode
+       movne   pc, lr                  @ give up if the CPU is not in HYP mode
 
 /*
  * Configure HSCTLR to set correct exception endianness/instruction set
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary)
  * Eventually, CPU-specific code might be needed -- assume not for now
  *
  * This code relies on the "eret" instruction to synchronize the
- * various coprocessor accesses.
+ * various coprocessor accesses. This is done when we switch to SVC
+ * (see safe_svcmode_maskall).
  */
        @ Now install the hypervisor stub:
        adr     r7, __hyp_stub_vectors
@@ -155,14 +156,7 @@ THUMB(     orr     r7, #(1 << 30)  )       @ HSCTLR.TE
 1:
 #endif
 
-       bic     r7, r4, #MODE_MASK
-       orr     r7, r7, #SVC_MODE
-THUMB( orr     r7, r7, #PSR_T_BIT      )
-       msr     spsr_cxsf, r7           @ This is SPSR_hyp.
-
-       __MSR_ELR_HYP(14)               @ msr elr_hyp, lr
-       __ERET                          @ return, switching to SVC mode
-                                       @ The boot CPU mode is left in r4.
+       bx      lr                      @ The boot CPU mode is left in r4.
 ENDPROC(__hyp_stub_install_secondary)
 
 __hyp_stub_do_trap:
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors)
        @ fall through
 ENTRY(__hyp_set_vectors)
        __HVC(0)
-       bx      lr
+       mov     pc, lr
 ENDPROC(__hyp_set_vectors)
 
 #ifndef ZIMAGE
index b9f015e843d8d10c461197c4399663942fb721e9..45eac87ed66a692859e7431af684f0b33b3ffeed 100644 (file)
@@ -75,7 +75,7 @@ void scu_enable(void __iomem *scu_base)
 int scu_power_mode(void __iomem *scu_base, unsigned int mode)
 {
        unsigned int val;
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
 
        if (mode > 3 || mode == 1 || cpu > 3)
                return -EINVAL;
index 9ee866ce0478027b72cb001ca4cfb1a80326da3f..4b678478cf95d9f60d6a4484b4a490ee46228d45 100644 (file)
@@ -105,6 +105,8 @@ static void __init soc_detect(u32 dbgu_base)
        switch (socid) {
        case ARCH_ID_AT91RM9200:
                at91_soc_initdata.type = AT91_SOC_RM9200;
+               if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE)
+                       at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
                at91_boot_soc = at91rm9200_soc;
                break;
 
index e103c290bc9e37ca5871d8e8182493e9fa0447c5..85afb031b6766909354b88502c379598f2b1d37a 100644 (file)
@@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
        select CPU_EXYNOS4210
        select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
        select PINCTRL
-       select PINCTRL_EXYNOS4
+       select PINCTRL_EXYNOS
        select USE_OF
        help
          Machine support for Samsung Exynos4 machine with device tree enabled.
index f710f163fb50056c199da83fb1295b4f2cbca759..65656ff0eb339c10d581196f0580420c8ba3c7bc 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <asm/arch_timer.h>
 #include <asm/cacheflush.h>
+#include <asm/cputype.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_twd.h>
 #include <asm/hardware/arm_timer.h>
@@ -60,7 +61,7 @@ static void __init highbank_scu_map_io(void)
 
 void highbank_set_cpu_jump(int cpu, void *jump_addr)
 {
-       cpu = cpu_logical_map(cpu);
+       cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 0);
        writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
index 70af9d13fcefefb1fad252c1a8c8a2bffa71ef3b..5995df7f2622eaa20382d7ad0e90483675a6b5eb 100644 (file)
@@ -37,7 +37,7 @@ extern void __iomem *sregs_base;
 
 static inline void highbank_set_core_pwr(void)
 {
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
        if (scu_base_addr)
                scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
        else
@@ -46,7 +46,7 @@ static inline void highbank_set_core_pwr(void)
 
 static inline void highbank_clear_core_pwr(void)
 {
-       int cpu = cpu_logical_map(smp_processor_id());
+       int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(smp_processor_id()), 0);
        if (scu_base_addr)
                scu_power_mode(scu_base_addr, SCU_PM_NORMAL);
        else
index 3e628fd7a674d3b5f6b4782b32ef03f7548c43a2..0a2349dc70184021e268db86ef9c3cb0e966f8a3 100644 (file)
@@ -851,6 +851,7 @@ config SOC_IMX6Q
        select HAVE_CAN_FLEXCAN if CAN
        select HAVE_IMX_GPC
        select HAVE_IMX_MMDC
+       select HAVE_IMX_SRC
        select HAVE_SMP
        select MFD_SYSCON
        select PINCTRL
index b197aa73dc4b448ad603256f8ea1ea5a9be8e30c..2c570cdaae7b1f9152f6cb1e0dd122882cfac028 100644 (file)
@@ -254,9 +254,9 @@ int __init mx25_clocks_init(void)
        clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
        clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
        clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
-       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
+       clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
        clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0");
        /* i.mx25 has the i.mx35 type cspi */
        clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
index 4c1d1e4efc74ed1694ef0f0eca2ab68396171426..1ffe3b534e51562aca68b587c383bb3df00b38a8 100644 (file)
@@ -236,9 +236,9 @@ int __init mx27_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0");
        clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0");
        clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0");
-       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_ipg_gate], "ipg", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_ahb_gate], "ahb", "imx-udc-mx27");
        clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
        clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
        clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
index 8be64e0a4ace0bec7a33f64ad2c056dfd0ad6947..16ccbd41dea9da4b830787b11e099dff7a19a141 100644 (file)
@@ -139,9 +139,9 @@ int __init mx31_clocks_init(unsigned long fref)
        clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
        clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
        clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
-       clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
-       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27");
+       clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
        clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
        /* i.mx31 has the i.mx21 type uart */
        clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
index 66f3d65ea2755f0a522f2fdea52f9b9351fe5beb..f0727e80815dc7adcb7ed2f1fbfcb43723fbd91a 100644 (file)
@@ -251,9 +251,9 @@ int __init mx35_clocks_init()
        clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
        clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
        clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
-       clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27");
+       clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27");
+       clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27");
        clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
        clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
        clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
index 579023f59dc1458690c91bf5059706e4dd99c6c6..fb7cb841b64c70aa8a411fd061cbf34ff6d7b480 100644 (file)
@@ -269,9 +269,9 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
        clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
        clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
        clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
-       clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
-       clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
+       clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51");
+       clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51");
+       clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51");
        clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand");
        clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
        clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
index 7f2c10c7413abaf0786b45a29933bc60745bd12c..c0c4e723b7f5dee0a34526fd280cab2e8b3966b1 100644 (file)
@@ -436,6 +436,9 @@ int __init mx6q_clocks_init(void)
        for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
                clk_prepare_enable(clk[clks_init_on[i]]);
 
+       /* Set initial power mode */
+       imx6q_set_lpm(WAIT_CLOCKED);
+
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
        base = of_iomap(np, 0);
        WARN_ON(!base);
index 7191ab4434e52b5c881e170e9e7925e918ba42f5..fa36fb84ab193f2fb8480bf3cc70d68232824844 100644 (file)
@@ -142,6 +142,7 @@ extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
 extern void imx6q_clock_map_io(void);
 
 extern void imx_cpu_die(unsigned int cpu);
+extern int imx_cpu_kill(unsigned int cpu);
 
 #ifdef CONFIG_PM
 extern void imx6q_pm_init(void);
index 6277baf1b7be74496c4f000867bc9a899d7a7adb..9bd5777ff0e78aee55115a6255925ec0804a6593 100644 (file)
@@ -63,6 +63,7 @@ struct platform_device *__init imx_add_flexcan(
 
 #include <linux/fsl_devices.h>
 struct imx_fsl_usb2_udc_data {
+       const char *devid;
        resource_size_t iobase;
        resource_size_t irq;
 };
index 37e44398197b4703b7130734d166963f9e894e01..3c06bd96e9cc5e094a183213b1bdfd957ceda1b4 100644 (file)
 #include "../hardware.h"
 #include "devices-common.h"
 
-#define imx_fsl_usb2_udc_data_entry_single(soc)                                \
+#define imx_fsl_usb2_udc_data_entry_single(soc, _devid)                        \
        {                                                               \
+               .devid = _devid,                                        \
                .iobase = soc ## _USB_OTG_BASE_ADDR,                    \
                .irq = soc ## _INT_USB_OTG,                             \
        }
 
 #ifdef CONFIG_SOC_IMX25
 const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX25);
+       imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX25 */
 
 #ifdef CONFIG_SOC_IMX27
 const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX27);
+       imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX27 */
 
 #ifdef CONFIG_SOC_IMX31
 const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX31);
+       imx_fsl_usb2_udc_data_entry_single(MX31, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX31 */
 
 #ifdef CONFIG_SOC_IMX35
 const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX35);
+       imx_fsl_usb2_udc_data_entry_single(MX35, "imx-udc-mx27");
 #endif /* ifdef CONFIG_SOC_IMX35 */
 
 #ifdef CONFIG_SOC_IMX51
 const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst =
-       imx_fsl_usb2_udc_data_entry_single(MX51);
+       imx_fsl_usb2_udc_data_entry_single(MX51, "imx-udc-mx51");
 #endif
 
 struct platform_device *__init imx_add_fsl_usb2_udc(
@@ -57,7 +58,7 @@ struct platform_device *__init imx_add_fsl_usb2_udc(
                        .flags = IORESOURCE_IRQ,
                },
        };
-       return imx_add_platform_device_dmamask("fsl-usb2-udc", -1,
+       return imx_add_platform_device_dmamask(data->devid, -1,
                        res, ARRAY_SIZE(res),
                        pdata, sizeof(*pdata), DMA_BIT_MASK(32));
 }
index 10b0ed39f07f9ff5cea15ca7fd5359f96506bd34..25a47c616b2d7c2b855eaf510b84b4d466b51d62 100644 (file)
@@ -54,7 +54,7 @@ struct platform_device *__init imx_add_imx_fb(
                        .flags = IORESOURCE_IRQ,
                },
        };
-       return imx_add_platform_device_dmamask("imx-fb", 0,
+       return imx_add_platform_device_dmamask(data->devid, 0,
                        res, ARRAY_SIZE(res),
                        pdata, sizeof(*pdata), DMA_BIT_MASK(32));
 }
index 3dec962b0770aa9f088829da920c76a44a9d4113..7bc5fe15dda2a3cbc5eb43bd1c93d69830c4af70 100644 (file)
@@ -46,9 +46,11 @@ static inline void cpu_enter_lowpower(void)
 void imx_cpu_die(unsigned int cpu)
 {
        cpu_enter_lowpower();
-       imx_enable_cpu(cpu, false);
+       cpu_do_idle();
+}
 
-       /* spin here until hardware takes it down */
-       while (1)
-               ;
+int imx_cpu_kill(unsigned int cpu)
+{
+       imx_enable_cpu(cpu, false);
+       return 1;
 }
index 6c80424f678e3fad821aa8ea60685485db5ddb6e..e05cf407db659c834f6c2f4d500c26242c4e2d53 100644 (file)
@@ -22,8 +22,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/genalloc.h>
-
-#include "iram.h"
+#include "linux/platform_data/imx-iram.h"
 
 static unsigned long iram_phys_base;
 static void __iomem *iram_virt_base;
index 3777b805b76ba8645c50c41993f8ba06b550927d..66fae885c8429f3b9147d4a3c33cd6e739a0ae05 100644 (file)
@@ -92,5 +92,6 @@ struct smp_operations  imx_smp_ops __initdata = {
        .smp_boot_secondary     = imx_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_die                = imx_cpu_die,
+       .cpu_kill               = imx_cpu_kill,
 #endif
 };
index a17543da602da4d8c5504961434bba5bfa2551b2..ee42d20cba19f022d4a9ece8616e27b443d7e82e 100644 (file)
@@ -41,6 +41,7 @@ static int imx6q_pm_enter(suspend_state_t state)
                cpu_suspend(0, imx6q_suspend_finish);
                imx_smp_prepare();
                imx_gpc_post_resume();
+               imx6q_set_lpm(WAIT_CLOCKED);
                break;
        default:
                return -EINVAL;
index be50e795536d1aa2375df9957a057bac5d28e942..e7fcea7f33008b3e8ee5c3288475a726fd88c13d 100644 (file)
@@ -475,13 +475,12 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys)
 {
        int ret = 0;
 
+       if (!ap_syscon_base)
+               return -EINVAL;
+
        if (nr == 0) {
                sys->mem_offset = PHYS_PCI_MEM_BASE;
                ret = pci_v3_setup_resources(sys);
-               /* Remap the Integrator system controller */
-               ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
-               if (!ap_syscon_base)
-                       return -EINVAL;
        }
 
        return ret;
@@ -497,6 +496,13 @@ void __init pci_v3_preinit(void)
        unsigned int temp;
        int ret;
 
+       /* Remap the Integrator system controller */
+       ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
+       if (!ap_syscon_base) {
+               pr_err("unable to remap the AP syscon for PCIv3\n");
+               return;
+       }
+
        pcibios_min_mem = 0x00100000;
 
        /*
index 8821720ab5a481ca048b0acb1ef01010055ffa7f..f4632a809f6895a0481ea1876e3e0fe25da36fe8 100644 (file)
 #include <linux/gpio.h>
 #include <linux/of.h>
 #include "common.h"
-#include "mpp.h"
 
 static struct mv643xx_eth_platform_data ns2_ge00_data = {
        .phy_addr       = MV643XX_ETH_PHY_ADDR(8),
 };
 
-static unsigned int ns2_mpp_config[] __initdata = {
-       MPP0_SPI_SCn,
-       MPP1_SPI_MOSI,
-       MPP2_SPI_SCK,
-       MPP3_SPI_MISO,
-       MPP4_NF_IO6,
-       MPP5_NF_IO7,
-       MPP6_SYSRST_OUTn,
-       MPP7_GPO,               /* Fan speed (bit 1) */
-       MPP8_TW0_SDA,
-       MPP9_TW0_SCK,
-       MPP10_UART0_TXD,
-       MPP11_UART0_RXD,
-       MPP12_GPO,              /* Red led */
-       MPP14_GPIO,             /* USB fuse */
-       MPP16_GPIO,             /* SATA 0 power */
-       MPP17_GPIO,             /* SATA 1 power */
-       MPP18_NF_IO0,
-       MPP19_NF_IO1,
-       MPP20_SATA1_ACTn,
-       MPP21_SATA0_ACTn,
-       MPP22_GPIO,             /* Fan speed (bit 0) */
-       MPP23_GPIO,             /* Fan power */
-       MPP24_GPIO,             /* USB mode select */
-       MPP25_GPIO,             /* Fan rotation fail */
-       MPP26_GPIO,             /* USB device vbus */
-       MPP28_GPIO,             /* USB enable host vbus */
-       MPP29_GPIO,             /* Blue led (slow register) */
-       MPP30_GPIO,             /* Blue led (command register) */
-       MPP31_GPIO,             /* Board power off */
-       MPP32_GPIO,             /* Power button (0 = Released, 1 = Pushed) */
-       MPP33_GPO,              /* Fan speed (bit 2) */
-       0
-};
-
 #define NS2_GPIO_POWER_OFF     31
 
 static void ns2_power_off(void)
@@ -71,8 +35,6 @@ void __init ns2_init(void)
        /*
         * Basic setup. Needs to be called early.
         */
-       kirkwood_mpp_conf(ns2_mpp_config);
-
        if (of_machine_is_compatible("lacie,netspace_lite_v2") ||
            of_machine_is_compatible("lacie,netspace_mini_v2"))
                ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
index 5dcb369b58aa0c79e55159f9affbd8b0c2a105d2..99df4df680fda3b54e3dce605ea3793d3f6bd26b 100644 (file)
@@ -1,6 +1,8 @@
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
        -I$(srctree)/arch/arm/plat-orion/include
 
+AFLAGS_coherency_ll.o          := -Wa,-march=armv7-a
+
 obj-y += system-controller.o
 obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o
 obj-$(CONFIG_SMP)                += platsmp.o headsmp.o
index 5c8e9cee2c2e99a4e1562b07021edb77eb36a554..769c1feee1c469049290f9101745cc28ef01742f 100644 (file)
@@ -397,6 +397,12 @@ static struct omap_board_mux board_mux[] __initdata = {
                  OMAP_PULL_ENA),
        OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
 
+       /* UART2 - BT/FM/GPS shared transport */
+       OMAP4_MUX(UART2_CTS,    OMAP_PIN_INPUT  | OMAP_MUX_MODE0),
+       OMAP4_MUX(UART2_RTS,    OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+       OMAP4_MUX(UART2_RX,     OMAP_PIN_INPUT  | OMAP_MUX_MODE0),
+       OMAP4_MUX(UART2_TX,     OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
+
        { .reg_offset = OMAP_MUX_TERMINATOR },
 };
 
index 7e5febe456d97b0088d7dfe455124b8153f4ff41..ab7e952d207013e42888c2a7cbfe0cd4e7b1b162 100644 (file)
@@ -1935,6 +1935,8 @@ int __init omap2420_clk_init(void)
                        omap2_init_clk_hw_omap_clocks(c->lk.clk);
        }
 
+       omap2xxx_clkt_vps_late_init();
+
        omap2_clk_disable_autoidle_all();
 
        omap2_clk_enable_init_clocks(enable_init_clks,
index eda079b96c6aac0389c6539f7d93a0b0cf539e4f..eb3dab68d5362b4b6340de4fccca2b5fd8fff825 100644 (file)
@@ -2050,6 +2050,8 @@ int __init omap2430_clk_init(void)
                        omap2_init_clk_hw_omap_clocks(c->lk.clk);
        }
 
+       omap2xxx_clkt_vps_late_init();
+
        omap2_clk_disable_autoidle_all();
 
        omap2_clk_enable_init_clocks(enable_init_clks,
index 5789a5e255638d60f797a18840b3f0b01394e50a..a2cc046b47f460a83a0954ae5249e617ea091f01 100644 (file)
@@ -2026,14 +2026,13 @@ int __init omap4xxx_clk_init(void)
         * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
         * state when turning the ABE clock domain. Workaround this by
         * locking the ABE DPLL on boot.
+        * Lock the ABE DPLL in any case to avoid issues with audio.
         */
-       if (cpu_is_omap446x()) {
-               rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
-               if (!rc)
-                       rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
-               if (rc)
-                       pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-       }
+       rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
+       if (!rc)
+               rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
        return 0;
 }
index 5e304d0719a2ae763885d8e5468de14a1dad0257..626f3ea3142f55dfd0bcd5337a9758eab5d0cf71 100644 (file)
@@ -639,7 +639,7 @@ static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
        return cnt;
 }
 
-static void omap_init_ocp2scp(void)
+static void __init omap_init_ocp2scp(void)
 {
        struct omap_hwmod       *oh;
        struct platform_device  *pdev;
index 4c7566c7e24a3f5ca90c5d320cc43a76fcbbd82a..2a2cfa88ddbfdd456b6522c678ac7321057ad517 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/platform_data/omap_drm.h>
 
+#include "soc.h"
 #include "omap_device.h"
 #include "omap_hwmod.h"
 
@@ -56,7 +57,7 @@ static int __init omap_init_drm(void)
                        oh->name);
        }
 
-       platform_data.omaprev = GET_OMAP_REVISION();
+       platform_data.omaprev = GET_OMAP_TYPE;
 
        return platform_device_register(&omap_drm_device);
 
index 129d5081ed1572146a027eae3984b84457cf01be..793f54ac7d14b7cae5117404a564d527414d3e50 100644 (file)
@@ -2132,8 +2132,12 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
         * currently reset very early during boot, before I2C is
         * available, so it doesn't seem that we have any choice in
         * the kernel other than to avoid resetting it.
+        *
+        * Also, McPDM needs to be configured to NO_IDLE mode when it
+        * is in used otherwise vital clocks will be gated which
+        * results 'slow motion' audio playback.
         */
-       .flags          = HWMOD_EXT_OPT_MAIN_CLK,
+       .flags          = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_mcpdm_irqs,
        .sdma_reqs      = omap44xx_mcpdm_sdma_reqs,
        .main_clk       = "mcpdm_fck",
index 691aa674665a46cf4c8ce75c279443b82c5aff05..b8ad6e632bb84d9ed3ede854cefeeebe6b4cc00c 100644 (file)
@@ -165,15 +165,11 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match,
        struct device_node *np;
 
        for_each_matching_node(np, match) {
-               if (!of_device_is_available(np)) {
-                       of_node_put(np);
+               if (!of_device_is_available(np))
                        continue;
-               }
 
-               if (property && !of_get_property(np, property, NULL)) {
-                       of_node_put(np);
+               if (property && !of_get_property(np, property, NULL))
                        continue;
-               }
 
                of_add_property(np, &device_disabled);
                return np;
index a611ad3153c7b2e7e9a7e78803df0f9876bc677e..b6132aa95dc08b1ef2cc6ae0c4a5565b5213a0ab 100644 (file)
        GPIO76_LCD_PCLK,        \
        GPIO77_LCD_BIAS
 
+/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */
+#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT)
+#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT)
 
 extern int keypad_set_wake(unsigned int on);
 #endif /* __ASM_ARCH_MFP_PXA27X_H */
index 8047ee0effc582b421a28085624a2b58250e88e4..616cb87b61792544f470110cd6322b2d3ea9d914 100644 (file)
@@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void)
 EXPORT_SYMBOL(pxa27x_clear_otgph);
 
 static unsigned long ac97_reset_config[] = {
-       GPIO113_GPIO,
+       GPIO113_AC97_nRESET_GPIO_HIGH,
        GPIO113_AC97_nRESET,
-       GPIO95_GPIO,
+       GPIO95_AC97_nRESET_GPIO_HIGH,
        GPIO95_AC97_nRESET,
 };
 
index d6b5073692d2fdda8db2656a032f374894e57463..44754230fdcc3e23098f1cfcacee1530af508da4 100644 (file)
 /*
  * Only define NR_IRQS if less than NR_IRQS_EB
  */
-#define NR_IRQS_EB             (IRQ_EB_GIC_START + 96)
+#define NR_IRQS_EB             (IRQ_EB_GIC_START + 128)
 
 #if defined(CONFIG_MACH_REALVIEW_EB) \
        && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
index 553059f51841874c430d74bbbe7b4e108308fca6..755c0bb119f4c1e195c61281d776dcb92a50e6d8 100644 (file)
@@ -47,7 +47,7 @@ static struct spi_board_info wm1253_devs[] = {
                .bus_num        = 0,
                .chip_select    = 0,
                .mode           = SPI_MODE_0,
-               .irq            = S3C_EINT(5),
+               .irq            = S3C_EINT(4),
                .controller_data = &wm0010_spi_csinfo,
                .platform_data = &wm0010_pdata,
        },
index 7feb426fc202d10be8f74a8992b5de212b6e4ba0..d2e1a16690bd5807f961d6804b61dddfeb029fc7 100644 (file)
@@ -338,8 +338,10 @@ int __init s3c64xx_pm_init(void)
        for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++)
                pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false);
 
+#ifdef CONFIG_S3C_DEV_FB
        if (dev_get_platdata(&s3c_device_fb.dev))
                pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev);
+#endif
 
        return 0;
 }
index 3fdd0085e3067bbb1f03dbde1624bf76b595beec..8709a39bd34c461468404a4822f0a13ca0398205 100644 (file)
@@ -7,3 +7,4 @@ config ARCH_SUNXI
        select PINCTRL
        select SPARSE_IRQ
        select SUNXI_TIMER
+       select PINCTRL_SUNXI
\ No newline at end of file
index 5dea90636d94f91e10820a41a502f23b796fc522..3e5bbd0e5b23db9840cda61cb6c5174e5ebe3a12 100644 (file)
@@ -11,6 +11,7 @@ config UX500_SOC_COMMON
        select COMMON_CLK
        select PINCTRL
        select PINCTRL_NOMADIK
+       select PINCTRL_ABX500
        select PL310_ERRATA_753970 if CACHE_PL310
 
 config UX500_SOC_DB8500
@@ -18,6 +19,11 @@ config UX500_SOC_DB8500
        select CPU_FREQ_TABLE if CPU_FREQ
        select MFD_DB8500_PRCMU
        select PINCTRL_DB8500
+       select PINCTRL_DB8540
+       select PINCTRL_AB8500
+       select PINCTRL_AB8505
+       select PINCTRL_AB9540
+       select PINCTRL_AB8540
        select REGULATOR
        select REGULATOR_DB8500_PRCMU
 
index d453522edb0d66b0488e46247fa4446876e69f6f..b8781caa54b8b28d43c7504e61d85711729259e4 100644 (file)
@@ -90,26 +90,8 @@ static struct platform_device snowball_gpio_en_3v3_regulator_dev = {
        },
 };
 
-static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
+static struct abx500_gpio_platform_data ab8500_gpio_pdata = {
        .gpio_base              = MOP500_AB8500_PIN_GPIO(1),
-       .irq_base               = MOP500_AB8500_VIR_GPIO_IRQ_BASE,
-       /* config_reg is the initial configuration of ab8500 pins.
-        * The pins can be configured as GPIO or alt functions based
-        * on value present in GpioSel1 to GpioSel6 and AlternatFunction
-        * register. This is the array of 7 configuration settings.
-        * One has to compile time decide these settings. Below is the
-        * explanation of these setting
-        * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO
-        * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO
-        * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO
-        * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO
-        * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO
-        * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO
-        * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured
-        * as GPIO then this register selectes the alternate fucntions
-        */
-       .config_reg             = {0x00, 0x1E, 0x80, 0x01,
-                                       0x7A, 0x00, 0x00},
 };
 
 /* ab8500-codec */
index 5b286e06474ce481ff019f302bf1a66aec635687..b80ad9610e9752e3c1d9740f03ddaf5f7e0ec5d1 100644 (file)
@@ -285,7 +285,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
        OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
        /* Requires device name bindings. */
-       OF_DEV_AUXDATA("stericsson,nmk_pinctrl", U8500_PRCMU_BASE,
+       OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
                "pinctrl-db8500", NULL),
        /* Requires clock name and DMA bindings. */
        OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000,
index 7d34c52798b5dd1e7162d13eb63866cff62e2079..d526dd8e87d3b9927bd579339c59dab50070f397 100644 (file)
 #define MOP500_STMPE1601_IRQ_END       \
        MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS)
 
-/* AB8500 virtual gpio IRQ */
-#define AB8500_VIR_GPIO_NR_IRQS                        16
-
-#define MOP500_AB8500_VIR_GPIO_IRQ_BASE                \
-       MOP500_STMPE1601_IRQ_END
-#define MOP500_AB8500_VIR_GPIO_IRQ_END         \
-       (MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS)
-
-#define MOP500_NR_IRQS         MOP500_AB8500_VIR_GPIO_IRQ_END
+#define MOP500_NR_IRQS         MOP500_STMPE1601_IRQ_END
 
 #define MOP500_IRQ_END         MOP500_NR_IRQS
 
index 6b2fb87c8698a99254bdd56c0e4c2bf146994da5..dda3904dc64c1cb3f7681cf20998397c1e103f76 100644 (file)
@@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 
        if (is_coherent || nommu())
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (gfp & GFP_ATOMIC)
+       else if (!(gfp & __GFP_WAIT))
                addr = __alloc_from_pool(size, &page);
        else if (!IS_ENABLED(CONFIG_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
        size_t size, enum dma_data_direction dir,
        void (*op)(const void *, size_t, int))
 {
+       unsigned long pfn;
+       size_t left = size;
+
+       pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+       offset %= PAGE_SIZE;
+
        /*
         * A single sg entry may refer to multiple physically contiguous
         * pages.  But we still need to process highmem pages individually.
         * If highmem is not configured then the bulk of this loop gets
         * optimized out.
         */
-       size_t left = size;
        do {
                size_t len = left;
                void *vaddr;
 
+               page = pfn_to_page(pfn);
+
                if (PageHighMem(page)) {
-                       if (len + offset > PAGE_SIZE) {
-                               if (offset >= PAGE_SIZE) {
-                                       page += offset / PAGE_SIZE;
-                                       offset %= PAGE_SIZE;
-                               }
+                       if (len + offset > PAGE_SIZE)
                                len = PAGE_SIZE - offset;
-                       }
                        vaddr = kmap_high_get(page);
                        if (vaddr) {
                                vaddr += offset;
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
                        op(vaddr, len, dir);
                }
                offset = 0;
-               page++;
+               pfn++;
                left -= len;
        } while (left);
 }
index 9f0610243bd6cd3357c847a5e5d392353579f471..ce328c7f5c94556cdda6021e8aef1b950aba5aa2 100644 (file)
@@ -283,7 +283,7 @@ static struct mem_type mem_types[] = {
        },
        [MT_MEMORY_SO] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-                               L_PTE_MT_UNCACHED,
+                               L_PTE_MT_UNCACHED | L_PTE_XN,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
                                PMD_SECT_UNCACHED | PMD_SECT_XN,
index dd703ef09b8d9d623d83e4a131c12e87e329e838..b178d44e9eaa897c8f8a3a4bb017198cb7014cae 100644 (file)
@@ -20,7 +20,7 @@
  */
 ENTRY(versatile_secondary_startup)
        mrc     p15, 0, r0, c0, c0, 5
-       and     r0, r0, #15
+       bic     r0, #0xff000000
        adr     r4, 1f
        ldmia   r4, {r5, r6}
        sub     r4, r4, r5
index cc926c98598141a2fbcdb762148ec17782982d06..323ce1a62bbfa3465230632d5d7d35c458f380fa 100644 (file)
@@ -22,7 +22,7 @@
 @  IRQs disabled.
 @
 ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        add     r11, r4, #1             @ increment it
        str     r11, [r10, #TI_PREEMPT]
@@ -35,7 +35,7 @@ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry)
 
        __INIT
 ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
index ea0349f6358658065b52aa1473e877ee4fa8f5ba..dd5e56f95f3fbe3d03f7be90f1201a78a93ed568 100644 (file)
@@ -168,7 +168,7 @@ vfp_hw_state_valid:
                                        @ else it's one 32-bit instruction, so
                                        @ always subtract 4 from the following
                                        @ instruction address.
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
@@ -192,7 +192,7 @@ look_for_VFP_exceptions:
        @ not recognised by VFP
 
        DBGSTR  "not VFP"
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPT_COUNT
        get_thread_info r10
        ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
        sub     r11, r4, #1             @ decrement it
index f8f362aafee948601178c275bc81b4e9c8b51ae5..75e915b72471d3a77047a75186198fddaba2fec9 100644 (file)
@@ -21,7 +21,6 @@ config ARM64
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_GENERIC_HARDIRQS
        select HAVE_HW_BREAKPOINT if PERF_EVENTS
-       select HAVE_IRQ_WORK
        select HAVE_MEMBLOCK
        select HAVE_PERF_EVENTS
        select IRQ_DOMAIN
index 801e2d7fcbc6e9bd0e4795faea842604c0c57afc..32ac0aef006879cb57b9c6bfbc25c072924b8076 100644 (file)
@@ -1,4 +1,5 @@
 targets += dtbs
+targets += $(dtb-y)
 
 dtbs: $(addprefix $(obj)/, $(dtb-y))
 
index 07fea290d7c15b211e881856bb48d943a80c337a..fe32c0e4ac010d4460184d7c8ddca2317b9d151e 100644 (file)
 
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t))
+#define ELF_CORE_COPY_REGS(dest, regs) \
+       *(struct user_pt_regs *)&(dest) = (regs)->user_regs;
+
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 typedef struct user_fpsimd_state elf_fpregset_t;
 
index 64b13394950266308093f67bce6ddb7e55391428..e333a243bfccf4f6e1547ea2543e5a3fa3abd32b 100644 (file)
@@ -24,7 +24,8 @@
 /*
  * Software defined PTE bits definition.
  */
-#define PTE_VALID              (_AT(pteval_t, 1) << 0) /* pte_present() check */
+#define PTE_VALID              (_AT(pteval_t, 1) << 0)
+#define PTE_PROT_NONE          (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
 #define PTE_FILE               (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
 #define PTE_DIRTY              (_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL            (_AT(pteval_t, 1) << 56)
@@ -60,9 +61,12 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 extern pgprot_t pgprot_default;
 
-#define _MOD_PROT(p, b)        __pgprot(pgprot_val(p) | (b))
+#define __pgprot_modify(prot,mask,bits) \
+       __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
+#define _MOD_PROT(p, b)                __pgprot_modify(p, 0, b)
 
-#define PAGE_NONE              _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define PAGE_NONE              __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
 #define PAGE_SHARED            _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED_EXEC       _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
 #define PAGE_COPY              _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@@ -72,7 +76,7 @@ extern pgprot_t pgprot_default;
 #define PAGE_KERNEL            _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
 #define PAGE_KERNEL_EXEC       _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
 
-#define __PAGE_NONE            __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
+#define __PAGE_NONE            __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
 #define __PAGE_SHARED          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
 #define __PAGE_SHARED_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
 #define __PAGE_COPY            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
@@ -125,16 +129,15 @@ extern struct page *empty_zero_page;
 /*
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
-#define pte_present(pte)       (pte_val(pte) & PTE_VALID)
+#define pte_present(pte)       (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
 #define pte_dirty(pte)         (pte_val(pte) & PTE_DIRTY)
 #define pte_young(pte)         (pte_val(pte) & PTE_AF)
 #define pte_special(pte)       (pte_val(pte) & PTE_SPECIAL)
 #define pte_write(pte)         (!(pte_val(pte) & PTE_RDONLY))
 #define pte_exec(pte)          (!(pte_val(pte) & PTE_UXN))
 
-#define pte_present_exec_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
-        (PTE_VALID | PTE_USER))
+#define pte_valid_user(pte) \
+       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 
 #define PTE_BIT_FUNC(fn,op) \
 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -157,10 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
-       if (pte_present_exec_user(pte))
-               __sync_icache_dcache(pte, addr);
-       if (!pte_dirty(pte))
-               pte = pte_wrprotect(pte);
+       if (pte_valid_user(pte)) {
+               if (pte_exec(pte))
+                       __sync_icache_dcache(pte, addr);
+               if (!pte_dirty(pte))
+                       pte = pte_wrprotect(pte);
+       }
+
        set_pte(ptep, pte);
 }
 
@@ -170,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 #define pte_huge(pte)          ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
 #define pte_mkhuge(pte)                (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
 
-#define __pgprot_modify(prot,mask,bits)                \
-       __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
-
 #define __HAVE_ARCH_PTE_SPECIAL
 
 /*
@@ -264,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
+       const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
+                             PTE_PROT_NONE | PTE_VALID;
        pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
        return pte;
 }
index 58432625fdb36922fa48831142e5bdf3a08d7875..5ef47ba3ed459f9f786cb79abddc2ecf762e5aaf 100644 (file)
@@ -395,8 +395,13 @@ __SYSCALL(370, sys_name_to_handle_at)
 __SYSCALL(371, compat_sys_open_by_handle_at)
 __SYSCALL(372, compat_sys_clock_adjtime)
 __SYSCALL(373, sys_syncfs)
+__SYSCALL(374, compat_sys_sendmmsg)
+__SYSCALL(375, sys_setns)
+__SYSCALL(376, compat_sys_process_vm_readv)
+__SYSCALL(377, compat_sys_process_vm_writev)
+__SYSCALL(378, sys_ni_syscall)                 /* 378 for kcmp */
 
-#define __NR_compat_syscalls           374
+#define __NR_compat_syscalls           379
 
 /*
  * Compat syscall numbers used by the AArch64 kernel.
index c958cb84d75fd4c488d585a6bec5233aa839b21c..6a389dc1bd499c5de57c1a572bb22fb1a81c1494 100644 (file)
@@ -252,10 +252,6 @@ void update_vsyscall(struct timekeeper *tk)
 
 void update_vsyscall_tz(void)
 {
-       ++vdso_data->tb_seq_count;
-       smp_wmb();
        vdso_data->tz_minuteswest       = sys_tz.tz_minuteswest;
        vdso_data->tz_dsttime           = sys_tz.tz_dsttime;
-       smp_wmb();
-       ++vdso_data->tb_seq_count;
 }
index 8bf658d974f947da714eb5e7a6812552173fe140..f0a6d10b52114953dcfd818c66ad85f6cccccbd8 100644 (file)
@@ -73,8 +73,6 @@ ENTRY(__kernel_gettimeofday)
        /* If tz is NULL, return 0. */
        cbz     x1, 3f
        ldp     w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
-       seqcnt_read w9
-       seqcnt_check w9, 1b
        stp     w4, w5, [x1, #TZ_MINWEST]
 3:
        mov     x0, xzr
index aaf5199d8fcbfb69ddca12e7d48fd816f791e341..b3d18f9f3e8d466a66c44fc665e2d04f58492d4a 100644 (file)
@@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 #endif /* __ASM_AVR32_DMA_MAPPING_H */
index b6f3ad5441c5f33f27c44850ca8d19c9898001b7..67e4aaad78f5cda89302901f80c1227f28862336 100644 (file)
@@ -24,7 +24,6 @@ config BLACKFIN
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_IDE
-       select HAVE_IRQ_WORK
        select HAVE_KERNEL_GZIP if RAMKERNEL
        select HAVE_KERNEL_BZIP2 if RAMKERNEL
        select HAVE_KERNEL_LZMA if RAMKERNEL
@@ -38,7 +37,6 @@ config BLACKFIN
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_ATOMIC64
        select GENERIC_IRQ_PROBE
-       select IRQ_PER_CPU if SMP
        select USE_GENERIC_SMP_HELPERS if SMP
        select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
        select GENERIC_SMP_IDLE_THREAD
index bbf461076a0a2107ddc6ea932bc615fe74fdc7a1..054d9ec57d9dc1cea3eba24655a8e24349c06cef 100644 (file)
@@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        _dma_sync((dma_addr_t)vaddr, size, dir);
 }
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 #endif                         /* _BLACKFIN_DMA_MAPPING_H */
index 3c694065030f506708a9717ef7bbb5faf94f6bc1..88bd0d899bdbedfac95bfc99450f9f6fbcc00ca0 100644 (file)
@@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
 #define dma_free_noncoherent(d, s, v, h)  dma_free_coherent((d), (s), (v), (h))
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif /* _ASM_C6X_DMA_MAPPING_H */
index 8588b2ccf85444206480895bb067cd8c7366a558..2f0f654f1b4407484c0a22d1b418a44e5388f04f 100644 (file)
@@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
 }
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 
 #endif
index 9d262645f6675275c9e2c7adca9a66b114d03b88..17df48fc8f44a419e1abd9051ccac1b4645e7ea9 100644 (file)
@@ -3,7 +3,6 @@ config FRV
        default y
        select HAVE_IDE
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select HAVE_UID16
        select HAVE_GENERIC_HARDIRQS
index dfb811002c640fcc790aaecf47727a1c5f83c5fa..1746a2b8e6e7287cd9ad612caa00af6bd9cd8a28 100644 (file)
@@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        flush_write_buffers();
 }
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif  /* _ASM_DMA_MAPPING_H */
index 0744f7d7b1fd096b5ccbc71cab7305ea997fb3c3..e4decc6b8947e4fe68180010a524e0f7b6600084 100644 (file)
@@ -12,9 +12,7 @@ config HEXAGON
        # select ARCH_WANT_OPTIONAL_GPIOLIB
        # select ARCH_REQUIRE_GPIOLIB
        # select HAVE_CLK
-       # select IRQ_PER_CPU
        # select GENERIC_PENDING_IRQ if SMP
-       select HAVE_IRQ_WORK
        select GENERIC_ATOMIC64
        select HAVE_PERF_EVENTS
        select HAVE_GENERIC_HARDIRQS
index 3279646120e3be5bdbc970ca5297b5fce30011ee..00c2e88f77559eb9bca234936819071c83a0566b 100644 (file)
@@ -29,7 +29,6 @@ config IA64
        select ARCH_DISCARD_MEMBLOCK
        select GENERIC_IRQ_PROBE
        select GENERIC_PENDING_IRQ if SMP
-       select IRQ_PER_CPU
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
index 7fcf7f08ab0623ef87d62fb7ef001023b2871ed7..e2d3f5baf265408b49a201e8fa07e3b3b24fe939 100644 (file)
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
  * Otherwise we measure cpu time in jiffies using the generic definitions.
  */
 
 #ifndef __IA64_CPUTIME_H
 #define __IA64_CPUTIME_H
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-#include <asm-generic/cputime.h>
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+# include <asm-generic/cputime.h>
 #else
-
-#include <linux/time.h>
-#include <linux/jiffies.h>
-#include <asm/processor.h>
-
-typedef u64 __nocast cputime_t;
-typedef u64 __nocast cputime64_t;
-
-#define cputime_one_jiffy              jiffies_to_cputime(1)
-
-/*
- * Convert cputime <-> jiffies (HZ)
- */
-#define cputime_to_jiffies(__ct)       \
-       ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies_to_cputime(__jif)      \
-       (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
-#define cputime64_to_jiffies64(__ct)   \
-       ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies64_to_cputime64(__jif)  \
-       (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
-
-/*
- * Convert cputime <-> microseconds
- */
-#define cputime_to_usecs(__ct)         \
-       ((__force u64)(__ct) / NSEC_PER_USEC)
-#define usecs_to_cputime(__usecs)      \
-       (__force cputime_t)((__usecs) * NSEC_PER_USEC)
-#define usecs_to_cputime64(__usecs)    \
-       (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
-
-/*
- * Convert cputime <-> seconds
- */
-#define cputime_to_secs(__ct)          \
-       ((__force u64)(__ct) / NSEC_PER_SEC)
-#define secs_to_cputime(__secs)                \
-       (__force cputime_t)((__secs) * NSEC_PER_SEC)
-
-/*
- * Convert cputime <-> timespec (nsec)
- */
-static inline cputime_t timespec_to_cputime(const struct timespec *val)
-{
-       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
-       return (__force cputime_t) ret;
-}
-static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
-{
-       val->tv_sec  = (__force u64) ct / NSEC_PER_SEC;
-       val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
-}
-
-/*
- * Convert cputime <-> timeval (msec)
- */
-static inline cputime_t timeval_to_cputime(struct timeval *val)
-{
-       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
-       return (__force cputime_t) ret;
-}
-static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
-{
-       val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
-       val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
-}
-
-/*
- * Convert cputime <-> clock (USER_HZ)
- */
-#define cputime_to_clock_t(__ct)       \
-       ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
-#define clock_t_to_cputime(__x)                \
-       (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
-
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct)     \
-       cputime_to_clock_t((__force cputime_t)__ct)
-
+# include <asm/processor.h>
+# include <asm-generic/cputime_nsecs.h>
 extern void arch_vtime_task_switch(struct task_struct *tsk);
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
 #endif /* __IA64_CPUTIME_H */
index ff2ae41365840a616b4a7b6211aed1b2678c6e0a..020d655ed082bf9aabbf31a9091468e9045acb0d 100644 (file)
@@ -31,7 +31,7 @@ struct thread_info {
        mm_segment_t addr_limit;        /* user-level address space limit */
        int preempt_count;              /* 0=premptable, <0=BUG; will also serve as bh-counter */
        struct restart_block restart_block;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        __u64 ac_stamp;
        __u64 ac_leave;
        __u64 ac_stime;
@@ -69,7 +69,7 @@ struct thread_info {
 #define task_stack_page(tsk)   ((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 #define setup_thread_stack(p, org)                     \
        *task_thread_info(p) = *task_thread_info(org);  \
        task_thread_info(p)->ac_stime = 0;              \
index c57fa910f2c937e7473a9ea63e5ac9a7be74c86c..00cf03e0cb8295c8f489107cc025bf6fd377b108 100644 (file)
@@ -1,5 +1,5 @@
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /* read ar.itc in advance, and use it before leaving bank 0 */
 #define XEN_ACCOUNT_GET_STAMP          \
        MOV_FROM_ITC(pUStk, p6, r20, r2);
index a48bd9a9927bb3b42c7b2076bb0c4b359d83a2e5..46c9e3007315dedc0ca9c7aa38f8a1e522b3ca58 100644 (file)
@@ -41,7 +41,7 @@ void foo(void)
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
        DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
        DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
        DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
        DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
index 6bfd8429ee0f4a6aedd72aa051030928f72085fa..7a53530f22c219eb877d81d2392a85722e9503dc 100644 (file)
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
 #endif
 .global __paravirt_work_processed_syscall;
 __paravirt_work_processed_syscall:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        adds r2=PT(LOADRS)+16,r12
        MOV_FROM_ITC(pUStk, p9, r22, r19)       // fetch time at leave
        adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall:
 
        ld8 r29=[r2],16         // M0|1 load cr.ipsr
        ld8 r28=[r3],16         // M0|1 load cr.iip
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
        ;;
        ld8 r30=[r2],16         // M0|1 load cr.ifs
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall:
        ld8.fill r1=[r3],16                     // M0|1 load r1
 (pUStk) mov r17=1                              // A
        ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 (pUStk) st1 [r15]=r17                          // M2|3
 #else
 (pUStk) st1 [r14]=r17                          // M2|3
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall:
        shr.u r18=r19,16                // I0|1 get byte size of existing "dirty" partition
        COVER                           // B    add current frame into dirty partition & set cr.ifs
        ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        mov r19=ar.bsp                  // M2   get new backing store pointer
        st8 [r14]=r22                   // M    save time at leave
        mov f10=f0                      // F    clear f10
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
        adds r16=PT(CR_IPSR)+16,r12
        adds r17=PT(CR_IIP)+16,r12
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        .pred.rel.mutex pUStk,pKStk
        MOV_FROM_PSR(pKStk, r22, r29)   // M2 read PSR now that interrupts are disabled
        MOV_FROM_ITC(pUStk, p9, r22, r29)       // M  fetch time at leave
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
        ;;
        ld8.fill r12=[r16],16
        ld8.fill r13=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 (pUStk)        adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
 #else
 (pUStk)        adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
        ;;
        ld8 r20=[r16],16        // ar.fpsr
        ld8.fill r15=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 (pUStk)        adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18  // deferred
 #endif
        ;;
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
        ld8.fill r2=[r17]
 (pUStk)        mov r17=1
        ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        //  mmi_ :  ld8 st1 shr;;         mmi_ : st8 st1 shr;;
        //  mib  :  mov add br        ->  mib  : ld8 add br
        //  bbb_ :  br  nop cover;;       mbb_ : mov br  cover;;
index e662f178b990ab660526154dfa505c5452d57bf6..c4cd45d97749bf610f55b8be24a04ba5b7cec107 100644 (file)
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
        nop.i 0
        ;;
        mov ar.rsc=0                            // M2   set enforced lazy mode, pl 0, LE, loadrs=0
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        MOV_FROM_ITC(p0, p6, r30, r23)          // M    get cycle for accounting
 #else
        nop.m 0
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
        cmp.ne pKStk,pUStk=r0,r0                // A    set pKStk <- 0, pUStk <- 1
        br.call.sptk.many b7=ia64_syscall_setup // B
        ;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        // mov.m r30=ar.itc is called in advance
        add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
        add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
index 4738ff7bd66a28e35b260ea3706058fbf56adbda..9be4e497f3d3c253aa36c87459cdb4830a9e4fa7 100644 (file)
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock)
 sched_clock = ia64_native_sched_clock
 #endif
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 GLOBAL_ENTRY(cycle_to_cputime)
        alloc r16=ar.pfs,1,0,0,0
        addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
        shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
        br.ret.sptk.many rp
 END(cycle_to_cputime)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_IA64_BRL_EMU
 
index fa25689fc453b36e1cb455782af442bdbf8017ee..689ffcaa284e4ddbdbe1503b12d0713c7cb5c017 100644 (file)
@@ -784,7 +784,7 @@ ENTRY(break_fault)
 
 (p8)   adds r28=16,r28                         // A    switch cr.iip to next bundle
 (p9)   adds r8=1,r8                            // A    increment ei to next slot
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        ;;
        mov b6=r30                              // I0   setup syscall handler branch reg early
 #else
@@ -801,7 +801,7 @@ ENTRY(break_fault)
        //
 ///////////////////////////////////////////////////////////////////////
        st1 [r16]=r0                            // M2|3 clear current->thread.on_ustack flag
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        MOV_FROM_ITC(p0, p14, r30, r18)         // M    get cycle for accounting
 #else
        mov b6=r30                              // I0   setup syscall handler branch reg early
@@ -817,7 +817,7 @@ ENTRY(break_fault)
        cmp.eq p14,p0=r9,r0                     // A    are syscalls being traced/audited?
        br.call.sptk.many b7=ia64_syscall_setup // B
 1:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        // mov.m r30=ar.itc is called in advance, and r13 is current
        add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13  // A
        add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13  // A
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup)
        DBG_FAULT(16)
        FAULT(16)
 
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
        /*
         * There is no particular reason for this code to be here, other than
         * that there happens to be space here that would go unused otherwise.
index d56753a11636b723b402a32df60fc762af3c5e38..cc82a7d744c985ce18b0a70512cd54234d71aefa 100644 (file)
@@ -4,7 +4,7 @@
 #include "entry.h"
 #include "paravirt_inst.h"
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /* read ar.itc in advance, and use it before leaving bank 0 */
 #define ACCOUNT_GET_STAMP                              \
 (pUStk) mov.m r20=ar.itc;
index 4265ff64219b20eade95390e8cc2f47ec7500fba..b7a5fffe0924e03c860f50afcebc5bc0fc4ea7a3 100644 (file)
@@ -672,33 +672,6 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
        read_unlock(&tasklist_lock);
 }
 
-static inline int
-thread_matches (struct task_struct *thread, unsigned long addr)
-{
-       unsigned long thread_rbs_end;
-       struct pt_regs *thread_regs;
-
-       if (ptrace_check_attach(thread, 0) < 0)
-               /*
-                * If the thread is not in an attachable state, we'll
-                * ignore it.  The net effect is that if ADDR happens
-                * to overlap with the portion of the thread's
-                * register backing store that is currently residing
-                * on the thread's kernel stack, then ptrace() may end
-                * up accessing a stale value.  But if the thread
-                * isn't stopped, that's a problem anyhow, so we're
-                * doing as well as we can...
-                */
-               return 0;
-
-       thread_regs = task_pt_regs(thread);
-       thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
-       if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
-               return 0;
-
-       return 1;       /* looks like we've got a winner */
-}
-
 /*
  * Write f32-f127 back to task->thread.fph if it has been modified.
  */
index 88a794536bc01b9e55ec0b41aa00b37c55653eea..fbaac1afb8441926d3f2fdc9eec4ed85c6122da1 100644 (file)
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = {
 };
 static struct clocksource *itc_clocksource;
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 
 #include <linux/kernel_stat.h>
 
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk)
 
        account_system_time(tsk, 0, delta, delta);
 }
+EXPORT_SYMBOL_GPL(vtime_account_system);
 
 void vtime_account_idle(struct task_struct *tsk)
 {
        account_idle_time(vtime_delta(tsk));
 }
 
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 static irqreturn_t
 timer_interrupt (int irq, void *dev_id)
index 17f7a45948eac7473b3afb16e86b5512d58a2c4d..292805f0762ebd100351944c0d5abe9713b07cf2 100644 (file)
@@ -21,6 +21,22 @@ extern void *dma_alloc_coherent(struct device *, size_t,
 extern void dma_free_coherent(struct device *, size_t,
                              void *, dma_addr_t);
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       return dma_alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *cpu_addr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
                                          dma_addr_t *handle, gfp_t flag)
 {
@@ -99,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
 #include <asm-generic/dma-mapping-broken.h>
 #endif
 
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                          void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
 #endif  /* _M68K_DMA_MAPPING_H */
index bf86b29fe64a2b025fe1890da80d90270e730912..037028f4ab7033c55557ade43c6a959c89f9a0b5 100644 (file)
@@ -64,6 +64,8 @@ extern unsigned int kobjsize(const void *objp);
  */
 #define        VMALLOC_START   0
 #define        VMALLOC_END     0xffffffff
+#define        KMAP_START      0
+#define        KMAP_END        0xffffffff
 
 #include <asm-generic/pgtable.h>
 
index ae700f49e51da86b8b86b8ce9ee41b0a1908d6b1..b0768a657920f63c51522f2ddb0ef22da0c8dbc3 100644 (file)
@@ -130,7 +130,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
 #define start_thread(_regs, _pc, _usp)                  \
 do {                                                    \
        (_regs)->pc = (_pc);                            \
-       ((struct switch_stack *)(_regs))[-1].a6 = 0;    \
        setframeformat(_regs);                          \
        if (current->mm)                                \
                (_regs)->d5 = current->mm->start_data;  \
index 847994ce680456d0144cf0361085728316917b54..f9337f61466050bd48aa8f993c3d59a647e262d6 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            348
+#define NR_syscalls            349
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index b94bfbf907053953336eb0bc4e04e6cf269685fb..625f321001dc077fe13da02eb0844a50099653dc 100644 (file)
 #define __NR_process_vm_readv  345
 #define __NR_process_vm_writev 346
 #define __NR_kcmp              347
+#define __NR_finit_module      348
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index c30da5b3f2dbc2418085aeb6fed2940a2c175486..3f04ea0ab802750866835fcd9c3aa75ac7b28282 100644 (file)
@@ -368,4 +368,5 @@ ENTRY(sys_call_table)
        .long sys_process_vm_readv      /* 345 */
        .long sys_process_vm_writev
        .long sys_kcmp
+       .long sys_finit_module
 
index f0e05bce92f2533ee8dc78ade768595d825646df..afd8106fd83b10c2b1d51b956b178effd9a6b096 100644 (file)
 void *empty_zero_page;
 EXPORT_SYMBOL(empty_zero_page);
 
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
+extern void init_pointer_table(unsigned long ptable);
+extern pmd_t *zero_pgtable;
+#endif
+
 #ifdef CONFIG_MMU
 
 pg_data_t pg_data_map[MAX_NUMNODES];
@@ -69,9 +74,6 @@ void __init m68k_setup_node(int node)
        node_set_online(node);
 }
 
-extern void init_pointer_table(unsigned long ptable);
-extern pmd_t *zero_pgtable;
-
 #else /* CONFIG_MMU */
 
 /*
index 2ac626ab9d4380014a6f25a24825d79f9ee92205..9becc44d9d7a13b5f4df1f6871de8999d6b46932 100644 (file)
@@ -4,7 +4,6 @@ config MIPS
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IDE
        select HAVE_OPROFILE
-       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_ARCH_KGDB
@@ -2161,7 +2160,6 @@ source "mm/Kconfig"
 config SMP
        bool "Multi-Processing support"
        depends on SYS_SUPPORTS_SMP
-       select IRQ_PER_CPU
        select USE_GENERIC_SMP_HELPERS
        help
          This enables support for systems with more than one CPU. If you have
index d7af29f1fcf0079c7ccdcae5af9f0add4c8c2c80..ba611927749b9f81def164126a2b2b1d00bf42c5 100644 (file)
@@ -8,8 +8,10 @@ config BCM47XX_SSB
        select SSB_DRIVER_EXTIF
        select SSB_EMBEDDED
        select SSB_B43_PCI_BRIDGE if PCI
+       select SSB_DRIVER_PCICORE if PCI
        select SSB_PCICORE_HOSTMODE if PCI
        select SSB_DRIVER_GPIO
+       select GPIOLIB
        default y
        help
         Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
@@ -25,6 +27,7 @@ config BCM47XX_BCMA
        select BCMA_HOST_PCI if PCI
        select BCMA_DRIVER_PCI_HOSTMODE if PCI
        select BCMA_DRIVER_GPIO
+       select GPIOLIB
        default y
        help
         Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
index 9f883bf769530d18c880416850060cd738bb0fa8..33b72144db3100625bd17991c1651ddd803ddf2b 100644 (file)
@@ -30,6 +30,7 @@
  * measurement, and debugging facilities.
  */
 
+#include <linux/compiler.h>
 #include <linux/irqflags.h>
 #include <asm/octeon/cvmx.h>
 #include <asm/octeon/cvmx-l2c.h>
@@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
  */
 static void fault_in(uint64_t addr, int len)
 {
-       volatile char *ptr;
-       volatile char dummy;
+       char *ptr;
+
        /*
         * Adjust addr and length so we get all cache lines even for
         * small ranges spanning two cache lines.
         */
        len += addr & CVMX_CACHE_LINE_MASK;
        addr &= ~CVMX_CACHE_LINE_MASK;
-       ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+       ptr = cvmx_phys_to_ptr(addr);
        /*
         * Invalidate L1 cache to make sure all loads result in data
         * being in L2.
         */
        CVMX_DCACHE_INVALIDATE;
        while (len > 0) {
-               dummy += *ptr;
+               ACCESS_ONCE(*ptr);
                len -= CVMX_CACHE_LINE_SIZE;
                ptr += CVMX_CACHE_LINE_SIZE;
        }
index e9bfc0813c72e99a46b29dadb1c3556843fc31a1..7bfad0520e25731c548c949ccecc045e91bba002 100644 (file)
@@ -16,7 +16,7 @@
 #include <asm/mipsregs.h>
 
 #define DSP_DEFAULT    0x00000000
-#define DSP_MASK       0x3ff
+#define DSP_MASK       0x3f
 
 #define __enable_dsp_hazard()                                          \
 do {                                                                   \
index ab84064283db2c50d847d888b32e42a4d316dd7d..33c34adbecfa19ae457699d449d8242ed5a0f43c 100644 (file)
@@ -353,6 +353,7 @@ union mips_instruction {
        struct u_format u_format;
        struct c_format c_format;
        struct r_format r_format;
+       struct p_format p_format;
        struct f_format f_format;
        struct ma_format ma_format;
        struct b_format b_format;
index edaa06d9d492171090e10ee08ad370fae79bebe9..e410df4e1b3a650d99d8981c7b63c04d4218daf4 100644 (file)
@@ -21,4 +21,4 @@
 #define R10000_LLSC_WAR                        0
 #define MIPS34K_MISSED_ITLB_WAR                0
 
-#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
+#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
index c63191055e695c5a49812f3f32ed0c913d941a0a..013d5f781263e20cf3d9e93cf14b6552c4386cc8 100644 (file)
@@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)
 #else
 #define pte_pfn(x)             ((unsigned long)((x).pte >> _PFN_SHIFT))
 #define pfn_pte(pfn, prot)     __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot)     __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 #endif
 
 #define __pgd_offset(address)  pgd_index(address)
index a1a0452ac1853333b07dac4dc7495c9bdd2514a5..77d4fb33f75ad1dd765ba69f697ed771312b3e9a 100644 (file)
@@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
 
 header-y += auxvec.h
 header-y += bitsperlong.h
+header-y += break.h
 header-y += byteorder.h
 header-y += cachectl.h
 header-y += errno.h
index 6a2d758dd8e9ed0b4a0afb043d2972869620834c..83fa1460e294e02cf602cd5d611af6d02abbba39 100644 (file)
 #define MCOUNT_OFFSET_INSNS 4
 #endif
 
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+       ftrace_modify_all_code(command);
+}
+
 /*
  * Check if the address is in kernel space
  *
@@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
        return 0;
 }
 
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+                               unsigned int new_code2)
+{
+       int faulted;
+
+       safe_store_code(new_code1, ip, faulted);
+       if (unlikely(faulted))
+               return -EFAULT;
+       ip += 4;
+       safe_store_code(new_code2, ip, faulted);
+       if (unlikely(faulted))
+               return -EFAULT;
+       flush_icache_range(ip, ip + 8); /* original ip + 12 */
+       return 0;
+}
+#endif
+
 /*
  * The details about the calling site of mcount on MIPS
  *
@@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,
         * needed.
         */
        new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
-
+#ifdef CONFIG_64BIT
        return ftrace_modify_code(ip, new);
+#else
+       /*
+        * On 32 bit MIPS platforms, gcc adds a stack adjust
+        * instruction in the delay slot after the branch to
+        * mcount and expects mcount to restore the sp on return.
+        * This is based on a legacy API and does nothing but
+        * waste instructions so it's being removed at runtime.
+        */
+       return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
 }
 
 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
index 4c968e7efb747d997ec20ba8ceeb500f4c12807d..1658676733576e74867347f0f7d122df0c5a023e 100644 (file)
@@ -46,9 +46,8 @@
        PTR_L   a5, PT_R9(sp)
        PTR_L   a6, PT_R10(sp)
        PTR_L   a7, PT_R11(sp)
-       PTR_ADDIU       sp, PT_SIZE
 #else
-       PTR_ADDIU       sp, (PT_SIZE + 8)
+       PTR_ADDIU       sp, PT_SIZE
 #endif
 .endm
 
@@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)
        .globl _mcount
 _mcount:
        b       ftrace_stub
-        nop
+       addiu sp,sp,8
+
+       /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
        lw      t1, function_trace_stop
        bnez    t1, ftrace_stub
         nop
index eec690af6581616957089993e2decfd91f13b4fd..147cec19621d7a433429e66433fdd993c2abd62e 100644 (file)
@@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)
 
                        printk(KERN_WARNING
                               "VPE loader: TC %d is already in use.\n",
-                               t->index);
+                              v->tc->index);
                        return -ENOEXEC;
                }
        } else {
index f36acd1b38086d341458d927c4c4db99d3a40e0f..a7935bf0fecbf789299a2547abb7b0b2e8677e2b 100644 (file)
@@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
 #endif
 
        /* tell oprofile which irq to use */
-       cp0_perfcount_irq = LTQ_PERF_IRQ;
+       cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
 
        /*
         * if the timer irq is not one of the mips irqs we need to
index dc81ca8dc0dd4b0ccd81ae2edc78b059aa8fd016..288f7954988d06ef61cc94bbf29dfeab322796b8 100644 (file)
@@ -21,7 +21,7 @@ void __delay(unsigned long loops)
        "       .set    noreorder                               \n"
        "       .align  3                                       \n"
        "1:     bnez    %0, 1b                                  \n"
-#if __SIZEOF_LONG__ == 4
+#if BITS_PER_LONG == 32
        "       subu    %0, 1                                   \n"
 #else
        "       dsubu   %0, 1                                   \n"
index 7657fd21cd3fb35779ad6a37ff65c73547cedc88..cacfd31e8ec9d1121c86e81feb35fac4344a9a3b 100644 (file)
@@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)
 
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(__iounmap);
-
-int __virt_addr_valid(const volatile void *kaddr)
-{
-       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
-}
-EXPORT_SYMBOL_GPL(__virt_addr_valid);
index d9be7540a6be7979a265a3ce32acbbe241b2b91c..7e5fe2790d8a212a8d0163989280918d26a46a3c 100644 (file)
@@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
 
        return ret;
 }
+
+int __virt_addr_valid(const volatile void *kaddr)
+{
+       return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
index 4e7f49d3d5a8105c45dd09a6b86999ed0506a7d7..c5ce6992ac4c7086aea4167a7460723a5d0cb7be 100644 (file)
@@ -193,8 +193,11 @@ static void nlm_init_node(void)
 
 void __init prom_init(void)
 {
-       int i, *argv, *envp;            /* passed as 32 bit ptrs */
+       int *argv, *envp;               /* passed as 32 bit ptrs */
        struct psb_info *prom_infop;
+#ifdef CONFIG_SMP
+       int i;
+#endif
 
        /* truncate to 32 bit and sign extend all args */
        argv = (int *)(long)(int)fw_arg1;
index 1552522b8718bffb86762229ef5279b93094df16..6eaa4f2d0e38cb8c85e31734c7e090ec9a491cb3 100644 (file)
@@ -24,7 +24,7 @@
 #include <asm/mach-ath79/pci.h>
 
 #define AR71XX_PCI_MEM_BASE    0x10000000
-#define AR71XX_PCI_MEM_SIZE    0x08000000
+#define AR71XX_PCI_MEM_SIZE    0x07000000
 
 #define AR71XX_PCI_WIN0_OFFS           0x10000000
 #define AR71XX_PCI_WIN1_OFFS           0x11000000
index 86d77a666458bae80c60f94c9fc405dca5f8459c..c11c75be2d7e06ab0b1232c35926a8ec3c5ba574 100644 (file)
@@ -21,7 +21,7 @@
 #define AR724X_PCI_CTRL_SIZE   0x100
 
 #define AR724X_PCI_MEM_BASE    0x10000000
-#define AR724X_PCI_MEM_SIZE    0x08000000
+#define AR724X_PCI_MEM_SIZE    0x04000000
 
 #define AR724X_PCI_REG_RESET           0x18
 #define AR724X_PCI_REG_INT_STATUS      0x4c
index aa03f2e13385fe5083a50b17ae6f8c02e3ccb269..e70001cfa05b1ee650b132f9163e72766f2fbb5c 100644 (file)
@@ -6,6 +6,7 @@ config MN10300
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_KGDB
+       select GENERIC_ATOMIC64
        select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
        select GENERIC_CLOCKEVENTS
        select MODULES_USE_ELF_RELA
index c1be4397b1edb4b059e917331c5ef8173bcf1096..a18abfc558eb1d41b99cd22798976f517196b7bd 100644 (file)
@@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
        mn10300_dcache_flush_inv();
 }
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif
index b77feffbadea9b91f46d85be47d35a0f49d4b2f4..a32e34ecda9e49f9d3ee88e8c94621d441026bd0 100644 (file)
@@ -9,14 +9,12 @@ config PARISC
        select RTC_DRV_GENERIC
        select INIT_ALL_POSSIBLE
        select BUG
-       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select GENERIC_ATOMIC64 if !64BIT
        select HAVE_GENERIC_HARDIRQS
        select BROKEN_RODATA
        select GENERIC_IRQ_PROBE
        select GENERIC_PCI_IOMAP
-       select IRQ_PER_CPU
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
index 467bbd510eac04a5bc86a3f97f70332cf636ec19..106b395688e1d167bf764a76de70d5d58e4109d7 100644 (file)
@@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
 /* At the moment, we panic on error for IOMMU resource exaustion */
 #define dma_mapping_error(dev, x)      0
 
+/* This API cannot be supported on PA-RISC */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif
index bfb44247d7a7b023d1953d5983a110fc69966995..eb7850b46c255e98cc9c97ef873766f793398061 100644 (file)
@@ -1865,7 +1865,7 @@ syscall_restore:
 
        /* Are we being ptraced? */
        ldw     TASK_FLAGS(%r1),%r19
-       ldi     (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
+       ldi     _TIF_SYSCALL_TRACE_MASK,%r2
        and,COND(=)     %r19,%r2,%r0
        b,n     syscall_restore_rfi
 
@@ -1978,15 +1978,23 @@ syscall_restore_rfi:
        /* sr2 should be set to zero for userspace syscalls */
        STREG   %r0,TASK_PT_SR2(%r1)
 
-pt_regs_ok:
        LDREG   TASK_PT_GR31(%r1),%r2
-       depi    3,31,2,%r2                         /* ensure return to user mode. */
-       STREG   %r2,TASK_PT_IAOQ0(%r1)
+       depi    3,31,2,%r2                 /* ensure return to user mode. */
+       STREG   %r2,TASK_PT_IAOQ0(%r1)
        ldo     4(%r2),%r2
        STREG   %r2,TASK_PT_IAOQ1(%r1)
+       b       intr_restore
        copy    %r25,%r16
+
+pt_regs_ok:
+       LDREG   TASK_PT_IAOQ0(%r1),%r2
+       depi    3,31,2,%r2                 /* ensure return to user mode. */
+       STREG   %r2,TASK_PT_IAOQ0(%r1)
+       LDREG   TASK_PT_IAOQ1(%r1),%r2
+       depi    3,31,2,%r2
+       STREG   %r2,TASK_PT_IAOQ1(%r1)
        b       intr_restore
-       nop
+       copy    %r25,%r16
 
        .import schedule,code
 syscall_do_resched:
index c0b1affc06a8fdce9163cf06a384cc958705fa59..0299d63cd1128d9dc1e6d1877033565cf0cd672d 100644 (file)
@@ -410,11 +410,13 @@ void __init init_IRQ(void)
 {
        local_irq_disable();    /* PARANOID - should already be disabled */
        mtctl(~0UL, 23);        /* EIRR : clear all pending external intr */
-       claim_cpu_irqs();
 #ifdef CONFIG_SMP
-       if (!cpu_eiem)
+       if (!cpu_eiem) {
+               claim_cpu_irqs();
                cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+       }
 #else
+       claim_cpu_irqs();
        cpu_eiem = EIEM_MASK(TIMER_IRQ);
 #endif
         set_eiem(cpu_eiem);    /* EIEM : enable all external intr */
index 857c2f545470522bb51345dfaacb4f31e971113e..534abd4936e1ecf96a8f872d9ba90f930c1aff9e 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/asm-offsets.h>
 
 /* PSW bits we allow the debugger to modify */
-#define USER_PSW_BITS  (PSW_N | PSW_V | PSW_CB)
+#define USER_PSW_BITS  (PSW_N | PSW_B | PSW_V | PSW_CB)
 
 /*
  * Called by kernel/ptrace.c when detaching..
index 53799695599872565f1ad8c387cbf0e1b91ceb3d..fd051705a407dd7396ee2205c1fecf5eebc34bb4 100644 (file)
@@ -190,8 +190,10 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
        DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
                        (unsigned long)ka, sp, frame_size);
        
+       /* Align alternate stack and reserve 64 bytes for the signal
+          handler's frame marker.  */
        if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
-               sp = current->sas_ss_sp; /* Stacks grow up! */
+               sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
 
        DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
        return (void __user *) sp; /* Stacks grow up.  Fun. */
index 9071e093164af3ee8a7907a9b26521b8360121e8..933423fa5144aca0b32a8110d88ff20abc98b90e 100644 (file)
     Sgl_isinexact_to_fix(sgl_value,exponent)
 
 #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB)  \
-  {Sall(sgl_value) <<= SGL_EXP_LENGTH;  /*  left-justify  */           \
+  {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH;               \
     if (exponent <= 31) {                                              \
-       Dintp1(dresultA) = 0;                                           \
-       Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \
+       Dintp1(dresultA) = 0;                                           \
+       Dintp2(dresultB) = val >> (31 - exponent);                      \
     }                                                                  \
     else {                                                             \
-       Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent);          \
-       Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31);          \
+       Dintp1(dresultA) = val >> (63 - exponent);                      \
+       Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \
     }                                                                  \
-    Sall(sgl_value) >>= SGL_EXP_LENGTH;  /* return to original */      \
   }
 
 #define Duint_setzero(dresultA,dresultB)       \
index 17903f1f356be5790d6ddcde1ff3d5eabaf2ed78..561ccca7b1a7aa3c36d82beeb2f764568c45fecb 100644 (file)
@@ -118,14 +118,12 @@ config PPC
        select HAVE_SYSCALL_WRAPPERS if PPC64
        select GENERIC_ATOMIC64 if PPC32
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
        select HAVE_GENERIC_HARDIRQS
        select ARCH_WANT_IPC_PARSE_VERSION
        select SPARSE_IRQ
-       select IRQ_PER_CPU
        select IRQ_DOMAIN
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
index 29bb11ec6c640677a73c3e16058897789f1fba2f..4f35fc4623856888f441489e3a43abc6750ccd50 100644 (file)
@@ -1,6 +1,6 @@
 CONFIG_PPC64=y
 CONFIG_PPC_BOOK3E_64=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
 CONFIG_SMP=y
 CONFIG_NR_CPUS=256
 CONFIG_EXPERIMENTAL=y
index 88fa5c46f66f5481e2d62dfc0e4990fff4f782f3..f7df8362911fc80644b03badd8c639538ff59afd 100644 (file)
@@ -1,6 +1,6 @@
 CONFIG_PPC64=y
 CONFIG_PPC_BOOK3E_64=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_EXPERIMENTAL=y
index 840a2c2d043085434b715dfffa3d4399e70f383b..bcedeea0df8934424b94fc30d7c567f0a0b611ca 100644 (file)
@@ -1,6 +1,6 @@
 CONFIG_PPC64=y
 CONFIG_ALTIVEC=y
-# CONFIG_VIRT_CPU_ACCOUNTING is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
 CONFIG_EXPERIMENTAL=y
index 483733bd06d4e9bda0689c4fba66849e2471b7c2..607559ab271ff98b45de1f11964416bebaf32841 100644 (file)
@@ -8,7 +8,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
  * the same units as the timebase.  Otherwise we measure cpu time
  * in jiffies using the generic definitions.
  */
@@ -16,7 +16,7 @@
 #ifndef __POWERPC_CPUTIME_H
 #define __POWERPC_CPUTIME_H
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 #include <asm-generic/cputime.h>
 #ifdef __KERNEL__
 static inline void setup_cputime_one_jiffy(void) { }
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
 static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
 
 #endif /* __KERNEL__ */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 #endif /* __POWERPC_CPUTIME_H */
index 531fe0c3108f8e157b8a255cb6a2ae8fce142059..b1e7f2af1016c82e6584731a0d0112b80a5eb277 100644 (file)
@@ -145,7 +145,7 @@ struct dtl_entry {
 extern struct kmem_cache *dtl_cache;
 
 /*
- * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
  * reading from the dispatch trace log.  If other code wants to consume
  * DTL entries, it can set this pointer to a function that will get
  * called once for each DTL entry that gets processed.
index 9710be3a2d1753e7fef95287d182811e5ce5a0ba..136bba62efa48775bd8b4738f675e8796b0ee0b2 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/types.h>
 #include <asm/hw_irq.h>
+#include <linux/device.h>
 
 #define MAX_HWEVENTS           8
 #define MAX_EVENT_ALTERNATIVES 8
@@ -35,6 +36,7 @@ struct power_pmu {
        void            (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
        int             (*limited_pmc_event)(u64 event_id);
        u32             flags;
+       const struct attribute_group    **attr_groups;
        int             n_generic;
        int             *generic_events;
        int             (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
@@ -109,3 +111,27 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
  * If an event_id is not subject to the constraint expressed by a particular
  * field, then it will have 0 in both the mask and value for that field.
  */
+
+extern ssize_t power_events_sysfs_show(struct device *dev,
+                               struct device_attribute *attr, char *page);
+
+/*
+ * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
+ *
+ * Having a suffix allows us to have aliases in sysfs - eg: the generic
+ * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
+ * 'PM_CYC' where the latter is the name by which the event is known in
+ * POWER CPU specification.
+ */
+#define        EVENT_VAR(_id, _suffix)         event_attr_##_id##_suffix
+#define        EVENT_PTR(_id, _suffix)         &EVENT_VAR(_id, _suffix).attr.attr
+
+#define        EVENT_ATTR(_name, _id, _suffix)                                 \
+       PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id,    \
+                       power_events_sysfs_show)
+
+#define        GENERIC_EVENT_ATTR(_name, _id)  EVENT_ATTR(_name, _id, _g)
+#define        GENERIC_EVENT_PTR(_id)          EVENT_PTR(_id, _g)
+
+#define        POWER_EVENT_ATTR(_name, _id)    EVENT_ATTR(PM_##_name, _id, _p)
+#define        POWER_EVENT_PTR(_id)            EVENT_PTR(_id, _p)
index ea2a86e8ff95bf731a399850cf2c8934b3fa8a19..2d0e1f5d83394a60544cc3a3fa003973c868596b 100644 (file)
@@ -24,7 +24,7 @@
  * user_time and system_time fields in the paca.
  */
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 #define ACCOUNT_CPU_USER_ENTRY(ra, rb)
 #define ACCOUNT_CPU_USER_EXIT(ra, rb)
 #define ACCOUNT_STOLEN_TIME
@@ -70,7 +70,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 
 #endif /* CONFIG_PPC_SPLPAR */
 
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 /*
  * Macros for storing registers into and loading registers from
index ed0e0254b47f2b3ae67940e15d151b58dddc7462..e3af3286a06801643a760fcbfa2afd116ad01053 100644 (file)
@@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared {
 
 #define KVM_HCALL_TOKEN(num)     _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
 
-#include <uapi/asm/epapr_hcalls.h>
+#include <asm/epapr_hcalls.h>
 
 #define KVM_FEATURE_MAGIC_PAGE 1
 
index d22e73e4618b7924879517f1f59cb99dcd63b43e..e514de57a125333a4ce174cb399070b6cc62e3b4 100644 (file)
@@ -439,6 +439,8 @@ ret_from_fork:
 ret_from_kernel_thread:
        REST_NVGPRS(r1)
        bl      schedule_tail
+       li      r3,0
+       stw     r3,0(r1)
        mtlr    r14
        mr      r3,r15
        PPC440EP_ERR42
index b310a0573625dec8a672c267ab6158f8e90b37a6..ac057013f9fdaa5c0c64323df0a68c271de49a2f 100644 (file)
@@ -94,7 +94,7 @@ system_call_common:
        addi    r9,r1,STACK_FRAME_OVERHEAD
        ld      r11,exception_marker@toc(r2)
        std     r11,-16(r9)             /* "regshere" marker */
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
 BEGIN_FW_FTR_SECTION
        beq     33f
        /* if from user, see if there are any DTL entries to process */
@@ -110,7 +110,7 @@ BEGIN_FW_FTR_SECTION
        addi    r9,r1,STACK_FRAME_OVERHEAD
 33:
 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
 
        /*
         * A syscall should always be called with interrupts enabled
@@ -664,6 +664,19 @@ resume_kernel:
        ld      r4,TI_FLAGS(r9)
        andi.   r0,r4,_TIF_NEED_RESCHED
        bne     1b
+
+       /*
+        * arch_local_irq_restore() from preempt_schedule_irq above may
+        * enable hard interrupt but we really should disable interrupts
+        * when we return from the interrupt, and so that we don't get
+        * interrupted after loading SRR0/1.
+        */
+#ifdef CONFIG_PPC_BOOK3E
+       wrteei  0
+#else
+       ld      r10,PACAKMSR(r13) /* Get kernel MSR without EE */
+       mtmsrd  r10,1             /* Update machine state */
+#endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PREEMPT */
 
        .globl  fast_exc_return_irq
index c470a40b29f5d4937883cfcd8a40dd1c6bbdcfd1..a7bc7521c0645c6eb697308f039fb74eaad6b4af 100644 (file)
@@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
 static int kgdb_singlestep(struct pt_regs *regs)
 {
        struct thread_info *thread_info, *exception_thread_info;
-       struct thread_info *backup_current_thread_info = \
-               (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
+       struct thread_info *backup_current_thread_info;
 
        if (user_mode(regs))
                return 0;
 
+       backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
        /*
         * On Book E and perhaps other processors, singlestep is handled on
         * the critical exception stack.  This causes current_thread_info()
@@ -185,6 +185,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
                /* Restore current_thread_info lastly. */
                memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
 
+       kfree(backup_current_thread_info);
        return 1;
 }
 
index 6f6b1cccc91662037115e69dc072ac1f7a376be5..f77fa22754bcde0d9024f5365e1fa8ac7885a3a6 100644 (file)
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq);
 unsigned long ppc_tb_freq;
 EXPORT_SYMBOL_GPL(ppc_tb_freq);
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /*
  * Factors for converting from cputime_t (timebase ticks) to
  * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk)
        if (stolen)
                account_steal_time(stolen);
 }
+EXPORT_SYMBOL_GPL(vtime_account_system);
 
 void vtime_account_idle(struct task_struct *tsk)
 {
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk)
        account_user_time(tsk, utime, utimescaled);
 }
 
-#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 #define calc_cputime_factors()
 #endif
 
@@ -494,10 +495,15 @@ void timer_interrupt(struct pt_regs * regs)
        set_dec(DECREMENTER_MAX);
 
        /* Some implementations of hotplug will get timer interrupts while
-        * offline, just ignore these
+        * offline, just ignore these and we also need to set
+        * decrementers_next_tb as MAX to make sure __check_irq_replay
+        * don't replay timer interrupt when return, otherwise we'll trap
+        * here infinitely :(
         */
-       if (!cpu_online(smp_processor_id()))
+       if (!cpu_online(smp_processor_id())) {
+               *next_tb = ~(u64)0;
                return;
+       }
 
        /* Conditionally hard-enable interrupts now that the DEC has been
         * bumped to its maximum value
@@ -663,7 +669,7 @@ int update_persistent_clock(struct timespec now)
        struct rtc_time tm;
 
        if (!ppc_md.set_rtc_time)
-               return 0;
+               return -ENODEV;
 
        to_tm(now.tv_sec + 1 + timezone_offset, &tm);
        tm.tm_year -= 1900;
index 35f3cf0269b3100fff55aaee62f0c2ea5fc507fa..a353c485808c6ea203eac428106b5fdf41943b36 100644 (file)
@@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu)
 static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
 {
        unsigned long srr1 = vcpu->arch.shregs.msr;
+#ifdef CONFIG_PPC_POWERNV
        struct opal_machine_check_event *opal_evt;
+#endif
        long handled = 1;
 
        if (srr1 & SRR1_MC_LDSTERR) {
@@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
                handled = 0;
        }
 
+#ifdef CONFIG_PPC_POWERNV
        /*
         * See if OPAL has already handled the condition.
         * We assume that if the condition is recovered then OPAL
@@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
 
        if (handled)
                opal_evt->in_use = 0;
+#endif
 
        return handled;
 }
index b0855e5d8905ee50e6422ca9edfc37aa952f6b21..9d9cddc5b346ff9518009539c08804b1cf0f9682 100644 (file)
@@ -39,6 +39,7 @@
 #define OP_31_XOP_TRAP      4
 #define OP_31_XOP_LWZX      23
 #define OP_31_XOP_TRAP_64   68
+#define OP_31_XOP_DCBF      86
 #define OP_31_XOP_LBZX      87
 #define OP_31_XOP_STWX      151
 #define OP_31_XOP_STBX      215
@@ -374,6 +375,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
                        break;
 
+               case OP_31_XOP_DCBF:
                case OP_31_XOP_DCBI:
                        /* Do nothing. The guest is performing dcbi because
                         * hardware DMA is not snooped by the dcache, but
index 56585086413a4f9876c70eb6b6ea9f3cf1c1ef69..7443481a315c4922063a5f8801c62ee9bfb7691f 100644 (file)
@@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        sldi    r29,r5,SID_SHIFT - VPN_SHIFT
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
        or      r29,r28,r29
-
-       /* Calculate hash value for primary slot and store it in r28 */
-       rldicl  r5,r5,0,25              /* vsid & 0x0000007fffffffff */
-       rldicl  r0,r3,64-12,48          /* (ea >> 12) & 0xffff */
-       xor     r28,r5,r0
+       /*
+        * Calculate hash value for primary slot and store it in r28
+        * r3 = va, r5 = vsid
+        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+        */
+       rldicl  r0,r3,64-12,48
+       xor     r28,r5,r0               /* hash */
        b       4f
 
 3:     /* Calc vpn and put it in r29 */
@@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        /*
         * calculate hash value for primary slot and
         * store it in r28 for 1T segment
+        * r3 = va, r5 = vsid
         */
-       rldic   r28,r5,25,25            /* (vsid << 25) & 0x7fffffffff */
-       clrldi  r5,r5,40                /* vsid & 0xffffff */
-       rldicl  r0,r3,64-12,36          /* (ea >> 12) & 0xfffffff */
-       xor     r28,r28,r5
+       sldi    r28,r5,25               /* vsid << 25 */
+       /* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */
+       rldicl  r0,r3,64-12,36
+       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
        xor     r28,r28,r0              /* hash */
 
        /* Convert linux PTE bits into HW equivalents */
@@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
         */
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
        or      r29,r28,r29
-
-       /* Calculate hash value for primary slot and store it in r28 */
-       rldicl  r5,r5,0,25              /* vsid & 0x0000007fffffffff */
-       rldicl  r0,r3,64-12,48          /* (ea >> 12) & 0xffff */
-       xor     r28,r5,r0
+       /*
+        * Calculate hash value for primary slot and store it in r28
+        * r3 = va, r5 = vsid
+        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
+        */
+       rldicl  r0,r3,64-12,48
+       xor     r28,r5,r0               /* hash */
        b       4f
 
 3:     /* Calc vpn and put it in r29 */
@@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        /*
         * Calculate hash value for primary slot and
         * store it in r28  for 1T segment
+        * r3 = va, r5 = vsid
         */
-       rldic   r28,r5,25,25            /* (vsid << 25) & 0x7fffffffff */
-       clrldi  r5,r5,40                /* vsid & 0xffffff */
-       rldicl  r0,r3,64-12,36          /* (ea >> 12) & 0xfffffff */
-       xor     r28,r28,r5
+       sldi    r28,r5,25               /* vsid << 25 */
+       /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
+       rldicl  r0,r3,64-12,36
+       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
        xor     r28,r28,r0              /* hash */
 
        /* Convert linux PTE bits into HW equivalents */
@@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
        or      r29,r28,r29
 
-       /* Calculate hash value for primary slot and store it in r28 */
-       rldicl  r5,r5,0,25              /* vsid & 0x0000007fffffffff */
-       rldicl  r0,r3,64-16,52          /* (ea >> 16) & 0xfff */
-       xor     r28,r5,r0
+       /* Calculate hash value for primary slot and store it in r28
+        * r3 = va, r5 = vsid
+        * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
+        */
+       rldicl  r0,r3,64-16,52
+       xor     r28,r5,r0               /* hash */
        b       4f
 
 3:     /* Calc vpn and put it in r29 */
        sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
        rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
        or      r29,r28,r29
-
        /*
         * calculate hash value for primary slot and
         * store it in r28 for 1T segment
+        * r3 = va, r5 = vsid
         */
-       rldic   r28,r5,25,25            /* (vsid << 25) & 0x7fffffffff */
-       clrldi  r5,r5,40                /* vsid & 0xffffff */
-       rldicl  r0,r3,64-16,40          /* (ea >> 16) & 0xffffff */
-       xor     r28,r28,r5
+       sldi    r28,r5,25               /* vsid << 25 */
+       /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
+       rldicl  r0,r3,64-16,40
+       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
        xor     r28,r28,r0              /* hash */
 
        /* Convert linux PTE bits into HW equivalents */
index 315f9495e9b2b5aa42a6c9aaba5a6307ac03ddde..f444b94935f560f6a7abf1a1de46d88cd32e71d5 100644 (file)
@@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1)
        for (pmc = 0; pmc < 4; pmc++) {
                psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
                                << (OPROFILE_MAX_PMC_NUM - pmc)
-                               * OPROFILE_MAX_PMC_NUM);
+                               * OPROFILE_PMSEL_FIELD_WIDTH);
                psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
                                 * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
                unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
index aa2465e21f1a87b989e49eae2027fee72dc4948f..fa476d50791f28f690d77a236defafbe17896526 100644 (file)
@@ -1305,6 +1305,16 @@ static int power_pmu_event_idx(struct perf_event *event)
        return event->hw.idx;
 }
 
+ssize_t power_events_sysfs_show(struct device *dev,
+                               struct device_attribute *attr, char *page)
+{
+       struct perf_pmu_events_attr *pmu_attr;
+
+       pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+
+       return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
 struct pmu power_pmu = {
        .pmu_enable     = power_pmu_enable,
        .pmu_disable    = power_pmu_disable,
@@ -1537,6 +1547,8 @@ int __cpuinit register_power_pmu(struct power_pmu *pmu)
        pr_info("%s performance monitor hardware support registered\n",
                pmu->name);
 
+       power_pmu.attr_groups = ppmu->attr_groups;
+
 #ifdef MSR_HV
        /*
         * Use FCHV to ignore kernel events if MSR.HV is set.
index 2ee01e38d5e256b9a92f0ee678e08a19e768f572..b554879bd31e4c48cc6769092695c7f32a899af4 100644 (file)
 #define MMCR1_PMCSEL_SH(n)     (MMCR1_PMC1SEL_SH - (n) * 8)
 #define MMCR1_PMCSEL_MSK       0xff
 
+/*
+ * Power7 event codes.
+ */
+#define        PME_PM_CYC                      0x1e
+#define        PME_PM_GCT_NOSLOT_CYC           0x100f8
+#define        PME_PM_CMPLU_STALL              0x4000a
+#define        PME_PM_INST_CMPL                0x2
+#define        PME_PM_LD_REF_L1                0xc880
+#define        PME_PM_LD_MISS_L1               0x400f0
+#define        PME_PM_BRU_FIN                  0x10068
+#define        PME_PM_BRU_MPRED                0x400f6
+
 /*
  * Layout of constraint bits:
  * 6666555555555544444444443333333333222222222211111111110000000000
@@ -307,14 +319,14 @@ static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
 }
 
 static int power7_generic_events[] = {
-       [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
-       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x100f8, /* GCT_NOSLOT_CYC */
-       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x4000a,  /* CMPLU_STALL */
-       [PERF_COUNT_HW_INSTRUCTIONS] = 2,
-       [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880,      /* LD_REF_L1_LSU*/
-       [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0,         /* LD_MISS_L1   */
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068,  /* BRU_FIN      */
-       [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6,        /* BR_MPRED     */
+       [PERF_COUNT_HW_CPU_CYCLES] =                    PME_PM_CYC,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =       PME_PM_GCT_NOSLOT_CYC,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =        PME_PM_CMPLU_STALL,
+       [PERF_COUNT_HW_INSTRUCTIONS] =                  PME_PM_INST_CMPL,
+       [PERF_COUNT_HW_CACHE_REFERENCES] =              PME_PM_LD_REF_L1,
+       [PERF_COUNT_HW_CACHE_MISSES] =                  PME_PM_LD_MISS_L1,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =           PME_PM_BRU_FIN,
+       [PERF_COUNT_HW_BRANCH_MISSES] =                 PME_PM_BRU_MPRED,
 };
 
 #define C(x)   PERF_COUNT_HW_CACHE_##x
@@ -362,6 +374,57 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        },
 };
 
+
+GENERIC_EVENT_ATTR(cpu-cycles,                 CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend,    GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend,     CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions,               INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references,           LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses,               LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions,                BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses,              BRU_MPRED);
+
+POWER_EVENT_ATTR(CYC,                          CYC);
+POWER_EVENT_ATTR(GCT_NOSLOT_CYC,               GCT_NOSLOT_CYC);
+POWER_EVENT_ATTR(CMPLU_STALL,                  CMPLU_STALL);
+POWER_EVENT_ATTR(INST_CMPL,                    INST_CMPL);
+POWER_EVENT_ATTR(LD_REF_L1,                    LD_REF_L1);
+POWER_EVENT_ATTR(LD_MISS_L1,                   LD_MISS_L1);
+POWER_EVENT_ATTR(BRU_FIN,                      BRU_FIN)
+POWER_EVENT_ATTR(BRU_MPRED,                    BRU_MPRED);
+
+static struct attribute *power7_events_attr[] = {
+       GENERIC_EVENT_PTR(CYC),
+       GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
+       GENERIC_EVENT_PTR(CMPLU_STALL),
+       GENERIC_EVENT_PTR(INST_CMPL),
+       GENERIC_EVENT_PTR(LD_REF_L1),
+       GENERIC_EVENT_PTR(LD_MISS_L1),
+       GENERIC_EVENT_PTR(BRU_FIN),
+       GENERIC_EVENT_PTR(BRU_MPRED),
+
+       POWER_EVENT_PTR(CYC),
+       POWER_EVENT_PTR(GCT_NOSLOT_CYC),
+       POWER_EVENT_PTR(CMPLU_STALL),
+       POWER_EVENT_PTR(INST_CMPL),
+       POWER_EVENT_PTR(LD_REF_L1),
+       POWER_EVENT_PTR(LD_MISS_L1),
+       POWER_EVENT_PTR(BRU_FIN),
+       POWER_EVENT_PTR(BRU_MPRED),
+       NULL
+};
+
+
+static struct attribute_group power7_pmu_events_group = {
+       .name = "events",
+       .attrs = power7_events_attr,
+};
+
+static const struct attribute_group *power7_pmu_attr_groups[] = {
+       &power7_pmu_events_group,
+       NULL,
+};
+
 static struct power_pmu power7_pmu = {
        .name                   = "POWER7",
        .n_counter              = 6,
@@ -373,6 +436,7 @@ static struct power_pmu power7_pmu = {
        .get_alternatives       = power7_get_alternatives,
        .disable_pmc            = power7_disable_pmc,
        .flags                  = PPMU_ALT_SIPR,
+       .attr_groups            = power7_pmu_attr_groups,
        .n_generic              = ARRAY_SIZE(power7_generic_events),
        .generic_events         = power7_generic_events,
        .cache_events           = &power7_cache_events,
index 25db92a8e1cf919db8a919232805fb71ad480b7f..49318385d4fac71e6122588da95c895a18d9198d 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/errno.h>
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
index 95d00173029f52c498902a586a24917f09e99d4b..890f30e70f98fc52a39f063172cfbadc5e8713b4 100644 (file)
@@ -236,6 +236,13 @@ out:
 
 static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
+       /*
+        * We don't support CPU hotplug. Don't unmap after the system
+        * has already made it to a running state.
+        */
+       if (system_state != SYSTEM_BOOTING)
+               return 0;
+
        if (sdcasr_mapbase)
                iounmap(sdcasr_mapbase);
        if (sdcpwr_mapbase)
index a7648543c59e05435eac37c5099eeda84db9584d..0cc0ac07a55dc661e0e0ad3acceaa55d33818800 100644 (file)
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7;
  */
 static int dtl_buf_entries = N_DISPATCH_LOG;
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 struct dtl_ring {
        u64     write_index;
        struct dtl_entry *write_ptr;
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl)
        return per_cpu(dtl_rings, dtl->cpu).write_index;
 }
 
-#else /* CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 static int dtl_start(struct dtl *dtl)
 {
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl)
 {
        return lppaca_of(dtl->cpu).dtl_idx;
 }
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 static int dtl_enable(struct dtl *dtl)
 {
index ca55882465d6b0d027ef785f941c64a342985f99..527e12c9573be178757b8eb7920b94654224e589 100644 (file)
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = {
 
 struct kmem_cache *dtl_cache;
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /*
  * Allocate space for the dispatch trace log for all possible cpus
  * and register the buffers with the hypervisor.  This is used for
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void)
 
        return 0;
 }
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 static inline int alloc_dispatch_logs(void)
 {
        return 0;
 }
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 static int alloc_dispatch_log_kmem_cache(void)
 {
index b5ea38c2564753df883026f83fece8ab484df68c..c15ba7d1be643d9251079bba0d984a11f7d3d425 100644 (file)
@@ -78,7 +78,6 @@ config S390
        select HAVE_KVM if 64BIT
        select HAVE_ARCH_TRACEHOOK
        select INIT_ALL_POSSIBLE
-       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select HAVE_DEBUG_KMEMLEAK
index 4b8e08b56f49679b40bd4ee098bae0bb01945292..7e3ce78d42902e636de522d8f568e00e0f75701f 100644 (file)
@@ -24,8 +24,8 @@ CHECKFLAGS    += -D__s390__ -msize-long
 else
 LD_BFD         := elf64-s390
 LDFLAGS                := -m elf64_s390
-KBUILD_AFLAGS_MODULE += -fpic -D__PIC__
-KBUILD_CFLAGS_MODULE += -fpic -D__PIC__
+KBUILD_AFLAGS_MODULE += -fPIC
+KBUILD_CFLAGS_MODULE += -fPIC
 KBUILD_CFLAGS  += -m64
 KBUILD_AFLAGS  += -m64
 UTS_MACHINE    := s390x
index de015d85e3e5f9ce7f3f308e3bb5175d7bfcd2c0..bb9bdcd20864ea41546e670df5369c7bebfdda2d 100644 (file)
  */
 #define MAX_DMA_ADDRESS         0x80000000
 
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy   (0)
+#endif
+
 #endif /* _ASM_S390_DMA_H */
index 16c3eb164f4fe0cdb44b91eb3be271743b8beeb8..27cb32185ce1d950e5a6097198a1864a69e6594b 100644 (file)
@@ -85,6 +85,11 @@ static inline void iounmap(volatile void __iomem *addr)
 #define __raw_writel   zpci_write_u32
 #define __raw_writeq   zpci_write_u64
 
+#define readb_relaxed  readb
+#define readw_relaxed  readw
+#define readl_relaxed  readl
+#define readq_relaxed  readq
+
 #endif /* CONFIG_PCI */
 
 #include <asm-generic/io.h>
index e6972f85d2b0926e58832e955cefb538e58618e7..7def77302d630995a1018cd2ffe02a05ee289e10 100644 (file)
@@ -2,43 +2,61 @@
 #define _ASM_IRQ_H
 
 #include <linux/hardirq.h>
+#include <linux/percpu.h>
+#include <linux/cache.h>
 #include <linux/types.h>
 
-enum interruption_class {
+enum interruption_main_class {
        EXTERNAL_INTERRUPT,
        IO_INTERRUPT,
-       EXTINT_CLK,
-       EXTINT_EXC,
-       EXTINT_EMS,
-       EXTINT_TMR,
-       EXTINT_TLA,
-       EXTINT_PFL,
-       EXTINT_DSD,
-       EXTINT_VRT,
-       EXTINT_SCP,
-       EXTINT_IUC,
-       EXTINT_CMS,
-       EXTINT_CMC,
-       EXTINT_CMR,
-       IOINT_CIO,
-       IOINT_QAI,
-       IOINT_DAS,
-       IOINT_C15,
-       IOINT_C70,
-       IOINT_TAP,
-       IOINT_VMR,
-       IOINT_LCS,
-       IOINT_CLW,
-       IOINT_CTC,
-       IOINT_APB,
-       IOINT_ADM,
-       IOINT_CSC,
-       IOINT_PCI,
-       IOINT_MSI,
+       NR_IRQS
+};
+
+enum interruption_class {
+       IRQEXT_CLK,
+       IRQEXT_EXC,
+       IRQEXT_EMS,
+       IRQEXT_TMR,
+       IRQEXT_TLA,
+       IRQEXT_PFL,
+       IRQEXT_DSD,
+       IRQEXT_VRT,
+       IRQEXT_SCP,
+       IRQEXT_IUC,
+       IRQEXT_CMS,
+       IRQEXT_CMC,
+       IRQEXT_CMR,
+       IRQIO_CIO,
+       IRQIO_QAI,
+       IRQIO_DAS,
+       IRQIO_C15,
+       IRQIO_C70,
+       IRQIO_TAP,
+       IRQIO_VMR,
+       IRQIO_LCS,
+       IRQIO_CLW,
+       IRQIO_CTC,
+       IRQIO_APB,
+       IRQIO_ADM,
+       IRQIO_CSC,
+       IRQIO_PCI,
+       IRQIO_MSI,
        NMI_NMI,
-       NR_IRQS,
+       CPU_RST,
+       NR_ARCH_IRQS
 };
 
+struct irq_stat {
+       unsigned int irqs[NR_ARCH_IRQS];
+};
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+
+static __always_inline void inc_irq_stat(enum interruption_class irq)
+{
+       __get_cpu_var(irq_stat).irqs[irq]++;
+}
+
 struct ext_code {
        unsigned short subcode;
        unsigned short code;
index c928dc1938f23c8a90134c972147c9bbfecb005e..098adbb62660be234d4d2ed5171d5c0615027f31 100644 (file)
@@ -1365,6 +1365,18 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma,
        __pmd_idte(address, pmdp);
 }
 
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+                                     unsigned long address, pmd_t *pmdp)
+{
+       pmd_t pmd = *pmdp;
+
+       if (pmd_write(pmd)) {
+               __pmd_idte(address, pmdp);
+               set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
+       }
+}
+
 static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
 {
        pmd_t __pmd;
@@ -1387,10 +1399,7 @@ static inline int has_transparent_hugepage(void)
 
 static inline unsigned long pmd_pfn(pmd_t pmd)
 {
-       if (pmd_trans_huge(pmd))
-               return pmd_val(pmd) >> HPAGE_SHIFT;
-       else
-               return pmd_val(pmd) >> PAGE_SHIFT;
+       return pmd_val(pmd) >> PAGE_SHIFT;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
index fba4d66788a247c94adfba5c13ae750e19929496..4c060bb5b8eafbfc5b816411dc91f9dea1201daf 100644 (file)
@@ -128,4 +128,32 @@ static inline unsigned long long get_clock_monotonic(void)
        return get_clock_xt() - sched_clock_base_cc;
 }
 
+/**
+ * tod_to_ns - convert a TOD format value to nanoseconds
+ * @todval: to be converted TOD format value
+ * Returns: number of nanoseconds that correspond to the TOD format value
+ *
+ * Converting a 64 Bit TOD format value to nanoseconds means that the value
+ * must be divided by 4.096. In order to achieve that we multiply with 125
+ * and divide by 512:
+ *
+ *    ns = (todval * 125) >> 9;
+ *
+ * In order to avoid an overflow with the multiplication we can rewrite this.
+ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * we end up with
+ *
+ *    ns = ((2^32 * th + tl) * 125 ) >> 9;
+ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ *
+ */
+static inline unsigned long long tod_to_ns(unsigned long long todval)
+{
+       unsigned long long ns;
+
+       ns = ((todval >> 32) << 23) * 125;
+       ns += ((todval & 0xffffffff) * 125) >> 9;
+       return ns;
+}
+
 #endif
index 63e6078699f11caf124fdd94ea220cf730f860f3..864f693c237fe440cd02125e588c4b5867b6edb8 100644 (file)
 #define __NR_process_vm_writev 341
 #define __NR_s390_runtime_instr 342
 #define __NR_kcmp              343
-#define NR_syscalls 344
+#define __NR_finit_module      344
+#define NR_syscalls 345
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index 827e094a2f494d09bdf1936cde25df431ec7c012..9b9a805656b505e803a2bb9bb4da21e9c8f6f59c 100644 (file)
@@ -1659,3 +1659,9 @@ ENTRY(sys_kcmp_wrapper)
        llgfr   %r5,%r5                 # unsigned long
        llgfr   %r6,%r6                 # unsigned long
        jg      sys_kcmp
+
+ENTRY(sys_finit_module_wrapper)
+       lgfr    %r2,%r2                 # int
+       llgtr   %r3,%r3                 # const char __user *
+       lgfr    %r4,%r4                 # int
+       jg      sys_finit_module
index ba500d8dc392056ddd66e4978389d072452f6859..4e8215e0d4b6d3047ec7df266e350e0e0ad83992 100644 (file)
@@ -1127,13 +1127,14 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
        if (i == DEBUG_MAX_VIEWS) {
                pr_err("Registering view %s/%s would exceed the maximum "
                       "number of views %i\n", id->name, view->name, i);
-               debugfs_remove(pde);
                rc = -1;
        } else {
                id->views[i] = view;
                id->debugfs_entries[i] = pde;
        }
        spin_unlock_irqrestore(&id->lock, flags);
+       if (rc)
+               debugfs_remove(pde);
 out:
        return rc;
 }
@@ -1146,9 +1147,9 @@ EXPORT_SYMBOL(debug_register_view);
 int
 debug_unregister_view(debug_info_t * id, struct debug_view *view)
 {
-       int rc = 0;
-       int i;
+       struct dentry *dentry = NULL;
        unsigned long flags;
+       int i, rc = 0;
 
        if (!id)
                goto out;
@@ -1160,10 +1161,12 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
        if (i == DEBUG_MAX_VIEWS)
                rc = -1;
        else {
-               debugfs_remove(id->debugfs_entries[i]);
+               dentry = id->debugfs_entries[i];
                id->views[i] = NULL;
+               id->debugfs_entries[i] = NULL;
        }
        spin_unlock_irqrestore(&id->lock, flags);
+       debugfs_remove(dentry);
 out:
        return rc;
 }
index bf24293970ce544b358f9bb3f4d4c7daa9bde88f..9df824ea16672aea9e8f6a93ec6c925f7b982a3a 100644 (file)
 #include <asm/irq.h>
 #include "entry.h"
 
+DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
+EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
+
 struct irq_class {
        char *name;
        char *desc;
 };
 
-static const struct irq_class intrclass_names[] = {
+/*
+ * The list of "main" irq classes on s390. This is the list of interrrupts
+ * that appear both in /proc/stat ("intr" line) and /proc/interrupts.
+ * Historically only external and I/O interrupts have been part of /proc/stat.
+ * We can't add the split external and I/O sub classes since the first field
+ * in the "intr" line in /proc/stat is supposed to be the sum of all other
+ * fields.
+ * Since the external and I/O interrupt fields are already sums we would end
+ * up with having a sum which accounts each interrupt twice.
+ */
+static const struct irq_class irqclass_main_desc[NR_IRQS] = {
        [EXTERNAL_INTERRUPT] = {.name = "EXT"},
-       [IO_INTERRUPT]       = {.name = "I/O"},
-       [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
-       [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
-       [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
-       [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
-       [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
-       [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
-       [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
-       [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
-       [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
-       [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
-       [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
-       [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
-       [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
-       [IOINT_CIO]  = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
-       [IOINT_QAI]  = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
-       [IOINT_DAS]  = {.name = "DAS", .desc = "[I/O] DASD"},
-       [IOINT_C15]  = {.name = "C15", .desc = "[I/O] 3215"},
-       [IOINT_C70]  = {.name = "C70", .desc = "[I/O] 3270"},
-       [IOINT_TAP]  = {.name = "TAP", .desc = "[I/O] Tape"},
-       [IOINT_VMR]  = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
-       [IOINT_LCS]  = {.name = "LCS", .desc = "[I/O] LCS"},
-       [IOINT_CLW]  = {.name = "CLW", .desc = "[I/O] CLAW"},
-       [IOINT_CTC]  = {.name = "CTC", .desc = "[I/O] CTC"},
-       [IOINT_APB]  = {.name = "APB", .desc = "[I/O] AP Bus"},
-       [IOINT_ADM]  = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
-       [IOINT_CSC]  = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
-       [IOINT_PCI]  = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
-       [IOINT_MSI] =  {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
+       [IO_INTERRUPT]       = {.name = "I/O"}
+};
+
+/*
+ * The list of split external and I/O interrupts that appear only in
+ * /proc/interrupts.
+ * In addition this list contains non external / I/O events like NMIs.
+ */
+static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
+       [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"},
+       [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"},
+       [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"},
+       [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"},
+       [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"},
+       [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
+       [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"},
+       [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"},
+       [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"},
+       [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"},
+       [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
+       [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
+       [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
+       [IRQIO_CIO]  = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
+       [IRQIO_QAI]  = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
+       [IRQIO_DAS]  = {.name = "DAS", .desc = "[I/O] DASD"},
+       [IRQIO_C15]  = {.name = "C15", .desc = "[I/O] 3215"},
+       [IRQIO_C70]  = {.name = "C70", .desc = "[I/O] 3270"},
+       [IRQIO_TAP]  = {.name = "TAP", .desc = "[I/O] Tape"},
+       [IRQIO_VMR]  = {.name = "VMR", .desc = "[I/O] Unit Record Devices"},
+       [IRQIO_LCS]  = {.name = "LCS", .desc = "[I/O] LCS"},
+       [IRQIO_CLW]  = {.name = "CLW", .desc = "[I/O] CLAW"},
+       [IRQIO_CTC]  = {.name = "CTC", .desc = "[I/O] CTC"},
+       [IRQIO_APB]  = {.name = "APB", .desc = "[I/O] AP Bus"},
+       [IRQIO_ADM]  = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
+       [IRQIO_CSC]  = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
+       [IRQIO_PCI]  = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
+       [IRQIO_MSI]  = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
        [NMI_NMI]    = {.name = "NMI", .desc = "[NMI] Machine Check"},
+       [CPU_RST]    = {.name = "RST", .desc = "[CPU] CPU Restart"},
 };
 
 /*
@@ -68,30 +90,34 @@ static const struct irq_class intrclass_names[] = {
  */
 int show_interrupts(struct seq_file *p, void *v)
 {
-       int i = *(loff_t *) v, j;
+       int irq = *(loff_t *) v;
+       int cpu;
 
        get_online_cpus();
-       if (i == 0) {
+       if (irq == 0) {
                seq_puts(p, "           ");
-               for_each_online_cpu(j)
-                       seq_printf(p, "CPU%d       ",j);
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "CPU%d       ", cpu);
                seq_putc(p, '\n');
        }
-
-       if (i < NR_IRQS) {
-               seq_printf(p, "%s: ", intrclass_names[i].name);
-#ifndef CONFIG_SMP
-               seq_printf(p, "%10u ", kstat_irqs(i));
-#else
-               for_each_online_cpu(j)
-                       seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
-#endif
-               if (intrclass_names[i].desc)
-                       seq_printf(p, "  %s", intrclass_names[i].desc);
-                seq_putc(p, '\n');
-        }
+       if (irq < NR_IRQS) {
+               seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
+               seq_putc(p, '\n');
+               goto skip_arch_irqs;
+       }
+       for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
+               seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
+               for_each_online_cpu(cpu)
+                       seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
+               if (irqclass_sub_desc[irq].desc)
+                       seq_printf(p, "  %s", irqclass_sub_desc[irq].desc);
+               seq_putc(p, '\n');
+       }
+skip_arch_irqs:
        put_online_cpus();
-        return 0;
+       return 0;
 }
 
 /*
@@ -222,7 +248,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code,
                /* Serve timer interrupts first. */
                clock_comparator_work();
        }
-       kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+       kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
        if (ext_code.code != 0x1004)
                __get_cpu_var(s390_idle).nohz_delay = 1;
 
index a6daa5c5cdb047708be93a173e47095af9d0f9f4..7918fbea36bb31446d6bea5d438854c8c2464938 100644 (file)
@@ -254,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
        int umode;
 
        nmi_enter();
-       kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++;
+       inc_irq_stat(NMI_NMI);
        mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
        mcck = &__get_cpu_var(cpu_mcck);
        umode = user_mode(regs);
index c4e7269d4a0980d8736c5a9c32e916cb581b0a58..86ec7447e1f5d57f8c4a53eb2867d355db1f45f3 100644 (file)
@@ -229,7 +229,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
        if (!(alert & CPU_MF_INT_CF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
+       inc_irq_stat(IRQEXT_CMC);
        cpuhw = &__get_cpu_var(cpu_hw_events);
 
        /* Measurement alerts are shared and might happen when the PMU
index 61066f6f71a590d3d35fd688e8e6715719919dc1..077a99389b07919a2e0575b6b46249589f3cf968 100644 (file)
@@ -71,7 +71,7 @@ static void runtime_instr_int_handler(struct ext_code ext_code,
        if (!(param32 & CPU_MF_INT_RI_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++;
+       inc_irq_stat(IRQEXT_CMR);
 
        if (!current->thread.ri_cb)
                return;
index 2568590973ad2e429f394ce4f6147211e7169b11..a5360de85ec7e7eb190f64de6028d08ea2e2c68e 100644 (file)
@@ -16,7 +16,7 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/memblock.h>
@@ -289,6 +289,7 @@ void machine_power_off(void)
  * Dummy power off function.
  */
 void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL_GPL(pm_power_off);
 
 static int __init early_parse_mem(char *p)
 {
index 0b45baa55438831538288d3411f44701e0958bc5..7433a2f9e5ccc3352236f52e8f365147bbb6a433 100644 (file)
@@ -433,9 +433,9 @@ static void do_ext_call_interrupt(struct ext_code ext_code,
 
        cpu = smp_processor_id();
        if (ext_code.code == 0x1202)
-               kstat_cpu(cpu).irqs[EXTINT_EXC]++;
+               inc_irq_stat(IRQEXT_EXC);
        else
-               kstat_cpu(cpu).irqs[EXTINT_EMS]++;
+               inc_irq_stat(IRQEXT_EMS);
        /*
         * handle bit signal external calls
         */
@@ -623,9 +623,10 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
        return info;
 }
 
-static int smp_add_present_cpu(int cpu);
+static int __cpuinit smp_add_present_cpu(int cpu);
 
-static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
+static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
+                                      int sysfs_add)
 {
        struct pcpu *pcpu;
        cpumask_t avail;
@@ -708,6 +709,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
        pfault_init();
        notify_cpu_starting(smp_processor_id());
        set_cpu_online(smp_processor_id(), true);
+       inc_irq_stat(CPU_RST);
        local_irq_enable();
        /* cpu_idle will call schedule for us */
        cpu_idle();
@@ -985,7 +987,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
        return notifier_from_errno(err);
 }
 
-static int smp_add_present_cpu(int cpu)
+static int __cpuinit smp_add_present_cpu(int cpu)
 {
        struct cpu *c = &pcpu_devices[cpu].cpu;
        struct device *s = &c->dev;
index 48174850f3b0859f05b2b5023f09cdbb827c9d56..6a6c61f94dd32244d62a9d25985ed8844dfb988b 100644 (file)
@@ -352,3 +352,4 @@ SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wr
 SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
 SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
 SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
+SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)
index 7fcd690d42c753bfd027d00b05da12e35cf711cd..0aa98db8a80dff08f4ba392139e80eec173bfd62 100644 (file)
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
  */
 unsigned long long notrace __kprobes sched_clock(void)
 {
-       return (get_clock_monotonic() * 125) >> 9;
+       return tod_to_ns(get_clock_monotonic());
 }
 
 /*
@@ -120,6 +120,9 @@ static int s390_next_ktime(ktime_t expires,
        nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
        do_div(nsecs, 125);
        S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
+       /* Program the maximum value if we have an overflow (== year 2042) */
+       if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
+               S390_lowcore.clock_comparator = -1ULL;
        set_clock_comparator(S390_lowcore.clock_comparator);
        return 0;
 }
@@ -168,7 +171,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code,
                                       unsigned int param32,
                                       unsigned long param64)
 {
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
+       inc_irq_stat(IRQEXT_CLK);
        if (S390_lowcore.clock_comparator == -1ULL)
                set_clock_comparator(S390_lowcore.clock_comparator);
 }
@@ -179,7 +182,7 @@ static void stp_timing_alert(struct stp_irq_parm *);
 static void timing_alert_interrupt(struct ext_code ext_code,
                                   unsigned int param32, unsigned long param64)
 {
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
+       inc_irq_stat(IRQEXT_TLA);
        if (param32 & 0x00c40000)
                etr_timing_alert((struct etr_irq_parm *) &param32);
        if (param32 & 0x00038000)
index f1aba87cceb8acfd7d47df98ad3cb0854e3db288..4b2e3e317004a3cf5d725887c1136c9cbcdd4045 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -42,6 +43,7 @@ static struct mask_info socket_info;
 static struct mask_info book_info;
 
 struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
index e84b8b68444a7289f6544361378bd00617aa9346..ce9cc5aa2033f735eb2db02fc3ca6319943861b3 100644 (file)
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk)
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  */
-void vtime_account(struct task_struct *tsk)
+void vtime_account_irq_enter(struct task_struct *tsk)
 {
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, system;
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk)
 
        virt_timer_forward(system);
 }
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
 
 void vtime_account_system(struct task_struct *tsk)
-__attribute__((alias("vtime_account")));
+__attribute__((alias("vtime_account_irq_enter")));
 EXPORT_SYMBOL_GPL(vtime_account_system);
 
 void __kprobes vtime_stop_cpu(void)
index c30615e605ac6716a1f93ee8dc377f9691ee99e0..82c481ddef76286c9be88ce0e45e4df31780eb5d 100644 (file)
@@ -408,7 +408,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
+       sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
        hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
        VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
index c9011bfaabbe8594bdd6d3c63ce242a55897cff3..f090e819bf71817ccf564d8798632fc6bf2d7fb7 100644 (file)
@@ -613,7 +613,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                kvm_s390_deliver_pending_interrupts(vcpu);
 
        vcpu->arch.sie_block->icptcode = 0;
+       preempt_disable();
        kvm_guest_enter();
+       preempt_enable();
        VCPU_EVENT(vcpu, 6, "entering sie flags %x",
                   atomic_read(&vcpu->arch.sie_block->cpuflags));
        trace_kvm_s390_sie_enter(vcpu,
index 42601d6e166fe73db1e6e97382df8158212dd5d7..2fb9e63b8fc44e58ae415dddefa3a067f48071da 100644 (file)
@@ -569,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code,
        subcode = ext_code.subcode;
        if ((subcode & 0xff00) != __SUBCODE_MASK)
                return;
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
+       inc_irq_stat(IRQEXT_PFL);
        /* Get the token (= pid of the affected task). */
        pid = sizeof(void *) == 4 ? param32 : param64;
        rcu_read_lock();
index 0cb385da202c60836fd2cd006e5950ed448de656..b5b2916895e08dd8b562fb7784628c4a0294f55b 100644 (file)
@@ -233,7 +233,7 @@ static void hws_ext_handler(struct ext_code ext_code,
        if (!(param32 & CPU_MF_INT_SF_MASK))
                return;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
+       inc_irq_stat(IRQEXT_CMS);
        atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
 
        if (hws_wq)
index ff49427e9941957104a9fcaa5ce202be347edc29..60e0372545d2800778ef067a7da9d139899fc5b8 100644 (file)
@@ -160,35 +160,6 @@ int pci_proc_domain(struct pci_bus *bus)
 }
 EXPORT_SYMBOL_GPL(pci_proc_domain);
 
-/* Store PCI function information block */
-static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
-{
-       struct zpci_fib *fib;
-       u8 status, cc;
-
-       fib = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!fib)
-               return -ENOMEM;
-
-       do {
-               cc = __stpcifc(zdev->fh, 0, fib, &status);
-               if (cc == 2) {
-                       msleep(ZPCI_INSN_BUSY_DELAY);
-                       memset(fib, 0, PAGE_SIZE);
-               }
-       } while (cc == 2);
-
-       if (cc)
-               pr_err_once("%s: cc: %u  status: %u\n",
-                           __func__, cc, status);
-
-       /* Return PCI function controls */
-       *fc = fib->fc;
-
-       free_page((unsigned long) fib);
-       return (cc) ? -EIO : 0;
-}
-
 /* Modify PCI: Register adapter interruptions */
 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
                              u64 aibv)
@@ -469,7 +440,7 @@ static void zpci_irq_handler(void *dont, void *need)
        int rescan = 0, max = aisb_max;
        struct zdev_irq_map *imap;
 
-       kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
+       inc_irq_stat(IRQIO_PCI);
        sbit = start;
 
 scan:
@@ -481,7 +452,7 @@ scan:
                /* find vector bit */
                imap = bucket->imap[sbit];
                for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
+                       inc_irq_stat(IRQIO_MSI);
                        clear_bit(63 - mbit, &imap->aibv);
 
                        spin_lock(&imap->lock);
index 6138468b420f5653d0e3c96ae485a0e0a1649c95..a547419907c3a46be4e7a64c36f3f1a776abcfcf 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/pci.h>
 #include <asm/pci_dma.h>
 
-static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
-
 static struct kmem_cache *dma_region_table_cache;
 static struct kmem_cache *dma_page_table_cache;
 
index babc2b826c5caf5f26f9a029452e5067451190c1..9c833c5858712b88897eec7c135842242d8e2d75 100644 (file)
@@ -11,7 +11,6 @@ config SUPERH
        select HAVE_ARCH_TRACEHOOK
        select HAVE_DMA_API_DEBUG
        select HAVE_DMA_ATTRS
-       select HAVE_IRQ_WORK
        select HAVE_PERF_EVENTS
        select HAVE_DEBUG_BUGVERBOSE
        select ARCH_HAVE_CUSTOM_GPIO_H
@@ -91,9 +90,6 @@ config GENERIC_CSUM
 config GENERIC_HWEIGHT
        def_bool y
 
-config IRQ_PER_CPU
-       def_bool y
-
 config GENERIC_GPIO
        def_bool n
 
index 3fede4556c91eee6ae7e5aa7a449675710254150..a0fa5791cd44abe9b29784e26142338c11768c1d 100644 (file)
  *                                  OFF-ON : MMC
  */
 
+/*
+ * FSI - DA7210
+ *
+ * it needs amixer settings for playing
+ *
+ * amixer set 'HeadPhone' 80
+ * amixer set 'Out Mixer Left DAC Left' on
+ * amixer set 'Out Mixer Right DAC Right' on
+ */
+
 /* Heartbeat */
 static unsigned char led_pos[] = { 0, 1, 2, 3 };
 
index 37924afa8d8a26781a2e8ebf4952144b141f9295..bf9f44f17c2983a1f1994f451ab03d3e89c05355 100644 (file)
@@ -203,9 +203,9 @@ extern void __kernel_vsyscall;
        if (vdso_enabled)                                       \
                NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);        \
        else                                                    \
-               NEW_AUX_ENT(AT_IGNORE, 0);
+               NEW_AUX_ENT(AT_IGNORE, 0)
 #else
-#define VSYSCALL_AUX_ENT
+#define VSYSCALL_AUX_ENT       NEW_AUX_ENT(AT_IGNORE, 0)
 #endif /* CONFIG_VSYSCALL */
 
 #ifdef CONFIG_SH_FPU
index b1320d55ca305139f5087694a6c6b3c2db3a6990..e699a12cdcca0da391ee072797c74208fa93eddb 100644 (file)
@@ -39,7 +39,7 @@
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE     (TASK_SIZE / 3)
+#define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE / 3)
 
 /*
  * Bit of SR register
index 1ee8946f09520a987a19ae477cac15087f19bf4a..1cc7d31971435ee57f9997539fcec28e061b6187 100644 (file)
@@ -47,7 +47,7 @@ pc; })
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
-#define TASK_UNMAPPED_BASE     (TASK_SIZE / 3)
+#define TASK_UNMAPPED_BASE     PAGE_ALIGN(TASK_SIZE / 3)
 
 /*
  * Bit of SR register
index 9e465f246dc1e49882473fdcdeb34f6520eb6b10..d13a1d6237363747117770a7620f448b08474e47 100644 (file)
 #define __NR_process_vm_readv  365
 #define __NR_process_vm_writev 366
 #define __NR_kcmp              367
+#define __NR_finit_module      368
 
-#define NR_syscalls 368
+#define NR_syscalls 369
 
 #endif /* __ASM_SH_UNISTD_32_H */
index 8e3a2edd284eadbefe740d9fea0cbcd14a27e48f..e6820c86e8c7bc1c0daf34cd9a891c45b3a5775f 100644 (file)
 #define __NR_process_vm_readv  376
 #define __NR_process_vm_writev 377
 #define __NR_kcmp              378
+#define __NR_finit_module      379
 
-#define NR_syscalls 379
+#define NR_syscalls 380
 
 #endif /* __ASM_SH_UNISTD_64_H */
index fe97ae5e56f168e642e4825fc76575a8a61811b2..734234be2f011cfb529bacecd8a0efdfaa8762ca 100644 (file)
@@ -385,3 +385,4 @@ ENTRY(sys_call_table)
        .long sys_process_vm_readv      /* 365 */
        .long sys_process_vm_writev
        .long sys_kcmp
+       .long sys_finit_module
index 5c7b1c67bdc1c04f9c858644c066063a7eaf6919..579fcb9a896be37d01421ce96e8b3fec18ff7e3b 100644 (file)
@@ -405,3 +405,4 @@ sys_call_table:
        .long sys_process_vm_readv
        .long sys_process_vm_writev
        .long sys_kcmp
+       .long sys_finit_module
index 60164e65d66551c4375121e984fd0ea28a9525fa..52aa2011d7537daa0b502d1891df4ee0184079fe 100644 (file)
@@ -294,6 +294,8 @@ stack_panic:
        .align 2
 .L_init_thread_union:
        .long   init_thread_union
+.L_ebss:
+       .long   __bss_stop
 .Lpanic:
        .long   panic
 .Lpanic_s:
index 9f2edb5c555179de8d00ee5d2031546df35f8a9d..9bff3db17c8c8ad66952ec3d0d557e2f5433e311 100644 (file)
@@ -23,7 +23,6 @@ config SPARC
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select RTC_CLASS
        select RTC_DRV_M48T59
-       select HAVE_IRQ_WORK
        select HAVE_DMA_ATTRS
        select HAVE_DMA_API_DEBUG
        select HAVE_ARCH_JUMP_LABEL
@@ -61,6 +60,7 @@ config SPARC64
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_SYSCALL_WRAPPERS
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_SYSCALL_TRACEPOINTS
index 7870be0f5adc4d922c21874f06af9bedacc2dfd2..08fcce90316b36654dea14af5525a94a0cded6e8 100644 (file)
@@ -71,7 +71,6 @@
 #define PMD_PADDR      _AC(0xfffffffe,UL)
 #define PMD_PADDR_SHIFT        _AC(11,UL)
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define PMD_ISHUGE     _AC(0x00000001,UL)
 
 /* This is the PMD layout when PMD_ISHUGE is set.  With 4MB huge
@@ -86,7 +85,6 @@
 #define PMD_HUGE_ACCESSED      _AC(0x00000080,UL)
 #define PMD_HUGE_EXEC          _AC(0x00000040,UL)
 #define PMD_HUGE_SPLITTING     _AC(0x00000020,UL)
-#endif
 
 /* PGDs point to PMD tables which are 8K aligned.  */
 #define PGD_PADDR      _AC(0xfffffffc,UL)
@@ -628,6 +626,12 @@ static inline unsigned long pte_special(pte_t pte)
        return pte_val(pte) & _PAGE_SPECIAL;
 }
 
+static inline int pmd_large(pmd_t pmd)
+{
+       return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
+               (PMD_ISHUGE | PMD_HUGE_PRESENT);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline int pmd_young(pmd_t pmd)
 {
@@ -646,12 +650,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT);
 }
 
-static inline int pmd_large(pmd_t pmd)
-{
-       return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) ==
-               (PMD_ISHUGE | PMD_HUGE_PRESENT);
-}
-
 static inline int pmd_trans_splitting(pmd_t pmd)
 {
        return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) ==
index cac719d1bc5c6b8238ff0b6be5f34bc166980534..62ced589bcf78f1554bcadd4f420fad19c7ebcc1 100644 (file)
 #define __NR_process_vm_writev 339
 #define __NR_kern_features     340
 #define __NR_kcmp              341
+#define __NR_finit_module      342
 
-#define NR_syscalls            342
+#define NR_syscalls            343
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 04bacce76fe6be65cc925d8c1419d560a9eeb8d6..baf4366e2d6afe937db5fe792c3183d56da4c580 100644 (file)
@@ -378,7 +378,8 @@ static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
 /* Cook up fake bus resources for SUNW,simba PCI bridges which lack
  * a proper 'ranges' property.
  */
-static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
+static void apb_fake_ranges(struct pci_dev *dev,
+                           struct pci_bus *bus,
                            struct pci_pbm_info *pbm)
 {
        struct pci_bus_region region;
@@ -403,13 +404,15 @@ static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus,
        pcibios_bus_to_resource(dev, res, &region);
 }
 
-static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
+static void pci_of_scan_bus(struct pci_pbm_info *pbm,
+                           struct device_node *node,
                            struct pci_bus *bus);
 
 #define GET_64BIT(prop, i)     ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
 
 static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
-                              struct device_node *node, struct pci_dev *dev)
+                              struct device_node *node,
+                              struct pci_dev *dev)
 {
        struct pci_bus *bus;
        const u32 *busrange, *ranges;
@@ -500,7 +503,8 @@ after_ranges:
        pci_of_scan_bus(pbm, node, bus);
 }
 
-static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node,
+static void pci_of_scan_bus(struct pci_pbm_info *pbm,
+                           struct device_node *node,
                            struct pci_bus *bus)
 {
        struct device_node *child;
index b85238289717f95c1da83cda5b653d15533a119b..c647634ead2bc9e0eab04f6d9558579b93990f75 100644 (file)
@@ -366,7 +366,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm)
        pci_config_write8(addr, 64);
 }
 
-static void psycho_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
+static void psycho_scan_bus(struct pci_pbm_info *pbm,
+                           struct device *parent)
 {
        pbm_config_busmastering(pbm);
        pbm->is_66mhz_capable = 0;
index 531186d7c9ab3f0f3aaffe0e1a804fb28074c554..6f00d27e8dacb348283df0f1b30f00b5755de6d9 100644 (file)
@@ -442,7 +442,8 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
        sabre_register_error_handlers(pbm);
 }
 
-static void sabre_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op)
+static void sabre_pbm_init(struct pci_pbm_info *pbm,
+                          struct platform_device *op)
 {
        psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE);
        pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR;
index 29e888158ae6a07c7711f59c4cd36ad28138548f..8f76f23dac38ec66b0afea55a5311e612f2459f0 100644 (file)
@@ -1306,8 +1306,9 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm)
        }
 }
 
-static int schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op,
-                          u32 portid, int chip_type)
+static int schizo_pbm_init(struct pci_pbm_info *pbm,
+                          struct platform_device *op, u32 portid,
+                          int chip_type)
 {
        const struct linux_prom64_registers *regs;
        struct device_node *dp = op->dev.of_node;
index 1271b3a27d4ef4722834d89d16c6ad42373a27d1..be5bdf93c7676cfedffd91d645bffa3fe9cff419 100644 (file)
@@ -554,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
        regs = pr->phys_addr;
 
        iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
-       if (!iommu)
-               goto fatal_memory_error;
        strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
-       if (!strbuf)
+       if (!iommu || !strbuf)
                goto fatal_memory_error;
 
        op->dev.archdata.iommu = iommu;
@@ -656,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
        return;
 
 fatal_memory_error:
+       kfree(iommu);
+       kfree(strbuf);
        prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
 }
 
index 5147f574f1256a7f3304a716da22bfb2ef68d42a..6ac43c36bbbfb98ad769ba6492f66a5fbb2ab554 100644 (file)
@@ -85,4 +85,4 @@ sys_call_table:
 /*325*/        .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/        .long sys_ni_syscall, sys_kcmp
+/*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module
index cdbd9b817751472200246245e85830b504478a0f..1009ecb92678185a388d708e8b28be070669524d 100644 (file)
@@ -86,7 +86,7 @@ sys_call_table32:
        .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/        .word sys_kern_features, sys_kcmp
+/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
 
 #endif /* CONFIG_COMPAT */
 
@@ -164,4 +164,4 @@ sys_call_table:
        .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/        .word sys_kern_features, sys_kcmp
+/*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module
index 42c55df3aec300b5baee79e6af5123ab99c7b126..01ee23dd724d5812b7993245b8bcb15bb0a00a7a 100644 (file)
@@ -66,6 +66,56 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
        return 1;
 }
 
+static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+                       unsigned long end, int write, struct page **pages,
+                       int *nr)
+{
+       struct page *head, *page, *tail;
+       u32 mask;
+       int refs;
+
+       mask = PMD_HUGE_PRESENT;
+       if (write)
+               mask |= PMD_HUGE_WRITE;
+       if ((pmd_val(pmd) & mask) != mask)
+               return 0;
+
+       refs = 0;
+       head = pmd_page(pmd);
+       page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+       tail = page;
+       do {
+               VM_BUG_ON(compound_head(page) != head);
+               pages[*nr] = page;
+               (*nr)++;
+               page++;
+               refs++;
+       } while (addr += PAGE_SIZE, addr != end);
+
+       if (!page_cache_add_speculative(head, refs)) {
+               *nr -= refs;
+               return 0;
+       }
+
+       if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
+               *nr -= refs;
+               while (refs--)
+                       put_page(head);
+               return 0;
+       }
+
+       /* Any tail page need their mapcount reference taken before we
+        * return.
+        */
+       while (refs--) {
+               if (PageTail(tail))
+                       get_huge_page_tail(tail);
+               tail++;
+       }
+
+       return 1;
+}
+
 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
                int write, struct page **pages, int *nr)
 {
@@ -77,9 +127,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
                pmd_t pmd = *pmdp;
 
                next = pmd_addr_end(addr, end);
-               if (pmd_none(pmd))
+               if (pmd_none(pmd) || pmd_trans_splitting(pmd))
                        return 0;
-               if (!gup_pte_range(pmd, addr, next, write, pages, nr))
+               if (unlikely(pmd_large(pmd))) {
+                       if (!gup_huge_pmd(pmdp, pmd, addr, next,
+                                         write, pages, nr))
+                               return 0;
+               } else if (!gup_pte_range(pmd, addr, next, write,
+                                         pages, nr))
                        return 0;
        } while (pmdp++, addr = next, addr != end);
 
index 875d008828b8ec619c295cf1b2d5c8923ea0389b..1bb7ad4aeff4754937bdc09f667110d28348fc96 100644 (file)
@@ -140,6 +140,8 @@ config ARCH_DEFCONFIG
 
 source "init/Kconfig"
 
+source "kernel/Kconfig.freezer"
+
 menu "Tilera-specific configuration"
 
 config NR_CPUS
index 2a9b293fece6c00f2c71d8c5829340bb842970e5..31672918064cf781d9cf0197d812237125c4d8d0 100644 (file)
@@ -250,7 +250,9 @@ static inline void writeq(u64 val, unsigned long addr)
 #define iowrite32 writel
 #define iowrite64 writeq
 
-static inline void memset_io(void *dst, int val, size_t len)
+#if CHIP_HAS_MMIO() || defined(CONFIG_PCI)
+
+static inline void memset_io(volatile void *dst, int val, size_t len)
 {
        int x;
        BUG_ON((unsigned long)dst & 0x3);
@@ -277,6 +279,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                writel(*(u32 *)(src + x), dst + x);
 }
 
+#endif
+
 /*
  * The Tile architecture does not support IOPORT, even with PCI.
  * Unfortunately we can't yet simply not declare these methods,
index b4e96fef2cf8edc0b4931af8edcce502c7bebd4f..241c0bb60b12e5cf4e9eedce50279d648d313e25 100644 (file)
 #include <arch/interrupts.h>
 #include <arch/chip.h>
 
-#if !defined(__tilegx__) && defined(__ASSEMBLY__)
-
 /*
  * The set of interrupts we want to allow when interrupts are nominally
  * disabled.  The remainder are effectively "NMI" interrupts from
  * the point of view of the generic Linux code.  Note that synchronous
  * interrupts (aka "non-queued") are not blocked by the mask in any case.
  */
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
-#else
-#define LINUX_MASKABLE_INTERRUPTS_HI \
-       (~(INT_MASK_HI(INT_PERF_COUNT)))
-#endif
-
-#else
-
-#if CHIP_HAS_AUX_PERF_COUNTERS()
-#define LINUX_MASKABLE_INTERRUPTS \
-       (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT)))
-#else
 #define LINUX_MASKABLE_INTERRUPTS \
-       (~(INT_MASK(INT_PERF_COUNT)))
-#endif
+       (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
 
+#if CHIP_HAS_SPLIT_INTR_MASK()
+/* The same macro, but for the two 32-bit SPRs separately. */
+#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
+#define LINUX_MASKABLE_INTERRUPTS_HI \
+       (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
 #endif
 
 #ifndef __ASSEMBLY__
  * to know our current state.
  */
 DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
-#define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR)
+#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
 
 /* Disable interrupts. */
 #define arch_local_irq_disable() \
@@ -165,7 +153,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Prevent the given interrupt from being enabled next time we enable irqs. */
 #define arch_local_irq_mask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt))
+       (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt)))
 
 /* Prevent the given interrupt from being enabled immediately. */
 #define arch_local_irq_mask_now(interrupt) do { \
@@ -175,7 +163,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 
 /* Allow the given interrupt to be enabled next time we enable irqs. */
 #define arch_local_irq_unmask(interrupt) \
-       (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt))
+       (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt)))
 
 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
 #define arch_local_irq_unmask_now(interrupt) do { \
@@ -250,7 +238,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 /* Disable interrupts. */
 #define IRQ_DISABLE(tmp0, tmp1)                                        \
        {                                                       \
-        movei  tmp0, -1;                                       \
+        movei  tmp0, LINUX_MASKABLE_INTERRUPTS_LO;             \
         moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI)        \
        };                                                      \
        {                                                       \
index 96b5710505b6897b98e9eeb9c6f9317173fd2b23..2efe3f68b2d6a80059acd078d53acea552cf4631 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __ARCH_INTERRUPTS_H__
 #define __ARCH_INTERRUPTS_H__
 
+#ifndef __KERNEL__
 /** Mask for an interrupt. */
 /* Note: must handle breaking interrupts into high and low words manually. */
 #define INT_MASK_LO(intno) (1 << (intno))
@@ -23,6 +24,7 @@
 #ifndef __ASSEMBLER__
 #define INT_MASK(intno) (1ULL << (intno))
 #endif
+#endif
 
 
 /** Where a given interrupt executes */
 
 #ifndef __ASSEMBLER__
 #define QUEUED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define NONQUEUED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define CRITICAL_MASKED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define CRITICAL_UNMASKED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define MASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #define UNMASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define SYNC_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_SN_ACCESS) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_IDN_REFILL) | \
-    INT_MASK(INT_UDN_REFILL) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_SN_STATIC_ACCESS) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_SN_ACCESS) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_IDN_REFILL) | \
+    (1ULL << INT_UDN_REFILL) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_SN_STATIC_ACCESS) | \
     0)
 #define NON_SYNC_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_DMATLB_MISS) | \
-    INT_MASK(INT_DMATLB_ACCESS) | \
-    INT_MASK(INT_SNITLB_MISS) | \
-    INT_MASK(INT_SN_NOTIFY) | \
-    INT_MASK(INT_SN_FIREWALL) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_DMA_NOTIFY) | \
-    INT_MASK(INT_IDN_CA) | \
-    INT_MASK(INT_UDN_CA) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DMA_ASID) | \
-    INT_MASK(INT_SNI_ASID) | \
-    INT_MASK(INT_DMA_CPL) | \
-    INT_MASK(INT_SN_CPL) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_DMATLB_MISS) | \
+    (1ULL << INT_DMATLB_ACCESS) | \
+    (1ULL << INT_SNITLB_MISS) | \
+    (1ULL << INT_SN_NOTIFY) | \
+    (1ULL << INT_SN_FIREWALL) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_DMA_NOTIFY) | \
+    (1ULL << INT_IDN_CA) | \
+    (1ULL << INT_UDN_CA) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DMA_ASID) | \
+    (1ULL << INT_SNI_ASID) | \
+    (1ULL << INT_DMA_CPL) | \
+    (1ULL << INT_SN_CPL) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
     0)
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ARCH_INTERRUPTS_H__ */
index 5bb58b2e4e6f84370d04b6d233c212ddc37ddfad..13c9f91823484bf94b28703c3a3cede1e891c344 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __ARCH_INTERRUPTS_H__
 #define __ARCH_INTERRUPTS_H__
 
+#ifndef __KERNEL__
 /** Mask for an interrupt. */
 #ifdef __ASSEMBLER__
 /* Note: must handle breaking interrupts into high and low words manually. */
@@ -22,6 +23,7 @@
 #else
 #define INT_MASK(intno) (1ULL << (intno))
 #endif
+#endif
 
 
 /** Where a given interrupt executes */
 
 #ifndef __ASSEMBLER__
 #define QUEUED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define NONQUEUED_INTERRUPTS ( \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
     0)
 #define CRITICAL_MASKED_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
     0)
 #define CRITICAL_UNMASKED_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define MASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
     0)
 #define UNMASKABLE_INTERRUPTS ( \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #define SYNC_INTERRUPTS ( \
-    INT_MASK(INT_SINGLE_STEP_3) | \
-    INT_MASK(INT_SINGLE_STEP_2) | \
-    INT_MASK(INT_SINGLE_STEP_1) | \
-    INT_MASK(INT_SINGLE_STEP_0) | \
-    INT_MASK(INT_IDN_COMPLETE) | \
-    INT_MASK(INT_UDN_COMPLETE) | \
-    INT_MASK(INT_ITLB_MISS) | \
-    INT_MASK(INT_ILL) | \
-    INT_MASK(INT_GPV) | \
-    INT_MASK(INT_IDN_ACCESS) | \
-    INT_MASK(INT_UDN_ACCESS) | \
-    INT_MASK(INT_SWINT_3) | \
-    INT_MASK(INT_SWINT_2) | \
-    INT_MASK(INT_SWINT_1) | \
-    INT_MASK(INT_SWINT_0) | \
-    INT_MASK(INT_ILL_TRANS) | \
-    INT_MASK(INT_UNALIGN_DATA) | \
-    INT_MASK(INT_DTLB_MISS) | \
-    INT_MASK(INT_DTLB_ACCESS) | \
+    (1ULL << INT_SINGLE_STEP_3) | \
+    (1ULL << INT_SINGLE_STEP_2) | \
+    (1ULL << INT_SINGLE_STEP_1) | \
+    (1ULL << INT_SINGLE_STEP_0) | \
+    (1ULL << INT_IDN_COMPLETE) | \
+    (1ULL << INT_UDN_COMPLETE) | \
+    (1ULL << INT_ITLB_MISS) | \
+    (1ULL << INT_ILL) | \
+    (1ULL << INT_GPV) | \
+    (1ULL << INT_IDN_ACCESS) | \
+    (1ULL << INT_UDN_ACCESS) | \
+    (1ULL << INT_SWINT_3) | \
+    (1ULL << INT_SWINT_2) | \
+    (1ULL << INT_SWINT_1) | \
+    (1ULL << INT_SWINT_0) | \
+    (1ULL << INT_ILL_TRANS) | \
+    (1ULL << INT_UNALIGN_DATA) | \
+    (1ULL << INT_DTLB_MISS) | \
+    (1ULL << INT_DTLB_ACCESS) | \
     0)
 #define NON_SYNC_INTERRUPTS ( \
-    INT_MASK(INT_MEM_ERROR) | \
-    INT_MASK(INT_IDN_FIREWALL) | \
-    INT_MASK(INT_UDN_FIREWALL) | \
-    INT_MASK(INT_TILE_TIMER) | \
-    INT_MASK(INT_AUX_TILE_TIMER) | \
-    INT_MASK(INT_IDN_TIMER) | \
-    INT_MASK(INT_UDN_TIMER) | \
-    INT_MASK(INT_IDN_AVAIL) | \
-    INT_MASK(INT_UDN_AVAIL) | \
-    INT_MASK(INT_IPI_3) | \
-    INT_MASK(INT_IPI_2) | \
-    INT_MASK(INT_IPI_1) | \
-    INT_MASK(INT_IPI_0) | \
-    INT_MASK(INT_PERF_COUNT) | \
-    INT_MASK(INT_AUX_PERF_COUNT) | \
-    INT_MASK(INT_INTCTRL_3) | \
-    INT_MASK(INT_INTCTRL_2) | \
-    INT_MASK(INT_INTCTRL_1) | \
-    INT_MASK(INT_INTCTRL_0) | \
-    INT_MASK(INT_BOOT_ACCESS) | \
-    INT_MASK(INT_WORLD_ACCESS) | \
-    INT_MASK(INT_I_ASID) | \
-    INT_MASK(INT_D_ASID) | \
-    INT_MASK(INT_DOUBLE_FAULT) | \
+    (1ULL << INT_MEM_ERROR) | \
+    (1ULL << INT_IDN_FIREWALL) | \
+    (1ULL << INT_UDN_FIREWALL) | \
+    (1ULL << INT_TILE_TIMER) | \
+    (1ULL << INT_AUX_TILE_TIMER) | \
+    (1ULL << INT_IDN_TIMER) | \
+    (1ULL << INT_UDN_TIMER) | \
+    (1ULL << INT_IDN_AVAIL) | \
+    (1ULL << INT_UDN_AVAIL) | \
+    (1ULL << INT_IPI_3) | \
+    (1ULL << INT_IPI_2) | \
+    (1ULL << INT_IPI_1) | \
+    (1ULL << INT_IPI_0) | \
+    (1ULL << INT_PERF_COUNT) | \
+    (1ULL << INT_AUX_PERF_COUNT) | \
+    (1ULL << INT_INTCTRL_3) | \
+    (1ULL << INT_INTCTRL_2) | \
+    (1ULL << INT_INTCTRL_1) | \
+    (1ULL << INT_INTCTRL_0) | \
+    (1ULL << INT_BOOT_ACCESS) | \
+    (1ULL << INT_WORLD_ACCESS) | \
+    (1ULL << INT_I_ASID) | \
+    (1ULL << INT_D_ASID) | \
+    (1ULL << INT_DOUBLE_FAULT) | \
     0)
 #endif /* !__ASSEMBLER__ */
 #endif /* !__ARCH_INTERRUPTS_H__ */
index 54bc9a6678e8d6b03c319d56b7b5b234f502fb6e..4ea08090265426df8f5f61dbcfaf6a0c92904694 100644 (file)
@@ -1035,7 +1035,9 @@ handle_syscall:
        /* Ensure that the syscall number is within the legal range. */
        {
         moveli r20, hw2(sys_call_table)
+#ifdef CONFIG_COMPAT
         blbs   r30, .Lcompat_syscall
+#endif
        }
        {
         cmpltu r21, TREG_SYSCALL_NR_NAME, r21
@@ -1093,6 +1095,7 @@ handle_syscall:
         j      .Lresume_userspace   /* jump into middle of interrupt_return */
        }
 
+#ifdef CONFIG_COMPAT
 .Lcompat_syscall:
        /*
         * Load the base of the compat syscall table in r20, and
@@ -1117,6 +1120,7 @@ handle_syscall:
        { move r15, r4; addxi r4, r4, 0 }
        { move r16, r5; addxi r5, r5, 0 }
        j .Lload_syscall_pointer
+#endif
 
 .Linvalid_syscall:
        /* Report an invalid syscall back to the user program */
index 0e5661e7d00d36e770ccebfe752e891fa24e1e6f..caf93ae117930d407bf6a335039817cfdac67408 100644 (file)
@@ -159,7 +159,7 @@ static void save_arch_state(struct thread_struct *t);
 int copy_thread(unsigned long clone_flags, unsigned long sp,
                unsigned long arg, struct task_struct *p)
 {
-       struct pt_regs *childregs = task_pt_regs(p), *regs = current_pt_regs();
+       struct pt_regs *childregs = task_pt_regs(p);
        unsigned long ksp;
        unsigned long *callee_regs;
 
index baa3d905fee21c9fc2ef403449f7e4d671ad7e1e..d1b5c913ae724d4a4f794d0590e263d305a4a58a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/reboot.h>
 #include <linux/smp.h>
 #include <linux/pm.h>
+#include <linux/export.h>
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <hv/hypervisor.h>
@@ -49,3 +50,4 @@ void machine_restart(char *cmd)
 
 /* No interesting distinction to be made here. */
 void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
index 6a649a4462d35ee7b70f0b2305e58bd90028ed87..d1e15f7b59c68ab54ee62a137443270287f59f34 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/timex.h>
 #include <linux/hugetlb.h>
 #include <linux/start_kernel.h>
+#include <linux/screen_info.h>
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
@@ -49,6 +50,10 @@ static inline int ABS(int x) { return x >= 0 ? x : -x; }
 /* Chip information */
 char chip_model[64] __write_once;
 
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
 struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
index b2f44c28dda6f29c1867c59464462c63ac427e6a..ed258b8ae320229f401e7f22a9515ab4b6cda954 100644 (file)
@@ -112,7 +112,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
                       p->pc, p->sp, p->ex1);
                p = NULL;
        }
-       if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
+       if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
                return p;
        return NULL;
 }
@@ -484,6 +484,7 @@ void save_stack_trace(struct stack_trace *trace)
 {
        save_stack_trace_tsk(NULL, trace);
 }
+EXPORT_SYMBOL_GPL(save_stack_trace);
 
 #endif
 
index db4fb89e12d89a4461b8cc19c99885469fcaf22c..8f8ad814b1398619314b5a26d62695279d4f63f1 100644 (file)
@@ -12,6 +12,7 @@
  *   more details.
  */
 
+#include <linux/export.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 #include <arch/icache.h>
@@ -165,3 +166,4 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
        __insn_mtspr(SPR_DSTREAM_PF, old_dstream_pf);
 #endif
 }
+EXPORT_SYMBOL_GPL(finv_buffer_remote);
index fdc403614d12b1d06a271942b1f846ef3871ac67..75947edccb26625fa8e68fb43fdff54cbd4900f0 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ctype.h>
 #include <linux/errno.h>
 #include <linux/smp.h>
+#include <linux/export.h>
 
 /*
  * Allow cropping out bits beyond the end of the array.
@@ -50,3 +51,4 @@ int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits)
        } while (*bp != '\0' && *bp != '\n');
        return 0;
 }
+EXPORT_SYMBOL(bitmap_parselist_crop);
index dd5f0a33fdaff95d6b53efb947fa86880d92d706..4385cb6fa00ade132dc1ba9fb3fda09fbc775151 100644 (file)
@@ -55,6 +55,8 @@ EXPORT_SYMBOL(hv_dev_poll_cancel);
 EXPORT_SYMBOL(hv_dev_close);
 EXPORT_SYMBOL(hv_sysconf);
 EXPORT_SYMBOL(hv_confstr);
+EXPORT_SYMBOL(hv_get_rtc);
+EXPORT_SYMBOL(hv_set_rtc);
 
 /* libgcc.a */
 uint32_t __udivsi3(uint32_t dividend, uint32_t divisor);
index 5f7868dcd6d482abc42e00b6349f5234d0c63c3b..1ae911939a18bdd6050e9a928b099471efeb7599 100644 (file)
@@ -408,6 +408,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
                __set_pte(ptep, pte_set_home(pteval, home));
        }
 }
+EXPORT_SYMBOL(homecache_change_page_home);
 
 struct page *homecache_alloc_pages(gfp_t gfp_mask,
                                   unsigned int order, int home)
index 79795af598105e9998f0de234b279fb6c61ca42f..260857a53b87f7ffc149a285e1bff6f582493577 100644 (file)
@@ -1,7 +1,7 @@
 # Select 32 or 64 bit
 config 64BIT
        bool "64-bit kernel" if ARCH = "x86"
-       default ARCH = "x86_64"
+       default ARCH != "i386"
        ---help---
          Say yes to build a 64-bit kernel - formerly known as x86_64
          Say no to build a 32-bit kernel - formerly known as i386
@@ -28,7 +28,6 @@ config X86
        select HAVE_OPROFILE
        select HAVE_PCSPKR_PLATFORM
        select HAVE_PERF_EVENTS
-       select HAVE_IRQ_WORK
        select HAVE_IOREMAP_PROT
        select HAVE_KPROBES
        select HAVE_MEMBLOCK
@@ -40,10 +39,12 @@ config X86
        select HAVE_DMA_CONTIGUOUS if !SWIOTLB
        select HAVE_KRETPROBES
        select HAVE_OPTPROBES
+       select HAVE_KPROBES_ON_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FENTRY if X86_64
        select HAVE_C_RECORDMCOUNT
        select HAVE_DYNAMIC_FTRACE
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_GRAPH_FP_TEST
@@ -106,6 +107,7 @@ config X86
        select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
        select GENERIC_TIME_VSYSCALL if X86_64
        select KTIME_SCALAR if X86_32
+       select ALWAYS_USE_PERSISTENT_CLOCK
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HAVE_CONTEXT_TRACKING if X86_64
@@ -114,6 +116,7 @@ config X86
        select MODULES_USE_ELF_RELA if X86_64
        select CLONE_BACKWARDS if X86_32
        select GENERIC_SIGALTSTACK
+       select ARCH_USE_BUILTIN_BSWAP
 
 config INSTRUCTION_DECODER
        def_bool y
@@ -320,6 +323,10 @@ config X86_BIGSMP
        ---help---
          This option is needed for the systems that have more than 8 CPUs
 
+config GOLDFISH
+       def_bool y
+       depends on X86_GOLDFISH
+
 if X86_32
 config X86_EXTENDED_PLATFORM
        bool "Support for extended (non-PC) x86 platforms"
@@ -402,6 +409,14 @@ config X86_UV
 # Following is an alphabetically sorted list of 32 bit extended platforms
 # Please maintain the alphabetic order if and when there are additions
 
+config X86_GOLDFISH
+       bool "Goldfish (Virtual Platform)"
+       depends on X86_32
+       ---help---
+        Enable support for the Goldfish virtual platform used primarily
+        for Android development. Unless you are building for the Android
+        Goldfish emulator say N here.
+
 config X86_INTEL_CE
        bool "CE4100 TV platform"
        depends on PCI
@@ -2138,6 +2153,7 @@ config OLPC_XO1_RTC
 config OLPC_XO1_SCI
        bool "OLPC XO-1 SCI extras"
        depends on OLPC && OLPC_XO1_PM
+       depends on INPUT=y
        select POWER_SUPPLY
        select GPIO_CS5535
        select MFD_CORE
@@ -2187,6 +2203,15 @@ config GEOS
        ---help---
          This option enables system support for the Traverse Technologies GEOS.
 
+config TS5500
+       bool "Technologic Systems TS-5500 platform support"
+       depends on MELAN
+       select CHECK_SIGNATURE
+       select NEW_LEDS
+       select LEDS_CLASS
+       ---help---
+         This option enables system support for the Technologic Systems TS-5500.
+
 endif # X86_32
 
 config AMD_NB
index e71fc4279aab62825524bba4357bd3cb3b6e847e..5c477260294f6ef78b1c1de6998b4860cc64ffbb 100644 (file)
@@ -2,7 +2,11 @@
 
 # select defconfig based on actual architecture
 ifeq ($(ARCH),x86)
+  ifeq ($(shell uname -m),x86_64)
+        KBUILD_DEFCONFIG := x86_64_defconfig
+  else
         KBUILD_DEFCONFIG := i386_defconfig
+  endif
 else
         KBUILD_DEFCONFIG := $(ARCH)_defconfig
 endif
index ccce0ed67dde703a80c78309252abbef17828291..379814bc41e3a956dc037a5f1d4ca23709854fbc 100644 (file)
@@ -71,7 +71,7 @@ GCOV_PROFILE := n
 $(obj)/bzImage: asflags-y  := $(SVGA_MODE)
 
 quiet_cmd_image = BUILD   $@
-cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@
+cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@
 
 $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
        $(call if_changed,image)
@@ -92,7 +92,7 @@ targets += voffset.h
 $(obj)/voffset.h: vmlinux FORCE
        $(call if_changed,voffset)
 
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
index b1942e222768e7c9c2707c14dbdbd9643a51a8b9..f8fa41190c3526f61ec7bb6e1dc5b0ebf08b3c4d 100644 (file)
@@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
        int i;
        struct setup_data *data;
 
-       data = (struct setup_data *)params->hdr.setup_data;
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
 
        while (data && data->next)
-               data = (struct setup_data *)data->next;
+               data = (struct setup_data *)(unsigned long)data->next;
 
        status = efi_call_phys5(sys_table->boottime->locate_handle,
                                EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -295,16 +295,18 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                if (!pci)
                        continue;
 
+#ifdef CONFIG_X86_64
                status = efi_call_phys4(pci->attributes, pci,
                                        EfiPciIoAttributeOperationGet, 0,
                                        &attributes);
-
+#else
+               status = efi_call_phys5(pci->attributes, pci,
+                                       EfiPciIoAttributeOperationGet, 0, 0,
+                                       &attributes);
+#endif
                if (status != EFI_SUCCESS)
                        continue;
 
-               if (!attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM)
-                       continue;
-
                if (!pci->romimage || !pci->romsize)
                        continue;
 
@@ -345,9 +347,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
                memcpy(rom->romdata, pci->romimage, pci->romsize);
 
                if (data)
-                       data->next = (uint64_t)rom;
+                       data->next = (unsigned long)rom;
                else
-                       params->hdr.setup_data = (uint64_t)rom;
+                       params->hdr.setup_data = (unsigned long)rom;
 
                data = (struct setup_data *)rom;
 
@@ -432,10 +434,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
                         * Once we've found a GOP supporting ConOut,
                         * don't bother looking any further.
                         */
+                       first_gop = gop;
                        if (conout_found)
                                break;
-
-                       first_gop = gop;
                }
        }
 
index aa4aaf1b23803e8ef0e180e3cbeedc7a4db6c556..1e3184f6072f913250b3b18371978a4fd1b3f63d 100644 (file)
@@ -35,11 +35,11 @@ ENTRY(startup_32)
 #ifdef CONFIG_EFI_STUB
        jmp     preferred_addr
 
-       .balign 0x10
        /*
         * We don't need the return address, so set up the stack so
-        * efi_main() can find its arugments.
+        * efi_main() can find its arguments.
         */
+ENTRY(efi_pe_entry)
        add     $0x4, %esp
 
        call    make_boot_params
@@ -50,8 +50,10 @@ ENTRY(startup_32)
        pushl   %eax
        pushl   %esi
        pushl   %ecx
+       sub     $0x4, %esp
 
-       .org 0x30,0x90
+ENTRY(efi_stub_entry)
+       add     $0x4, %esp
        call    efi_main
        cmpl    $0, %eax
        movl    %eax, %esi
index 2c4b171eec337619e8f2dab2b3c3b2048e622e51..f5d1aaa0dec87ce844317f2a343a90739408ac22 100644 (file)
@@ -201,12 +201,12 @@ ENTRY(startup_64)
         */
 #ifdef CONFIG_EFI_STUB
        /*
-        * The entry point for the PE/COFF executable is 0x210, so only
-        * legacy boot loaders will execute this jmp.
+        * The entry point for the PE/COFF executable is efi_pe_entry, so
+        * only legacy boot loaders will execute this jmp.
         */
        jmp     preferred_addr
 
-       .org 0x210
+ENTRY(efi_pe_entry)
        mov     %rcx, %rdi
        mov     %rdx, %rsi
        pushq   %rdi
@@ -218,7 +218,7 @@ ENTRY(startup_64)
        popq    %rsi
        popq    %rdi
 
-       .org 0x230,0x90
+ENTRY(efi_stub_entry)
        call    efi_main
        movq    %rax,%rsi
        cmpq    $0,%rax
index 88f7ff6da40442086b6c6664b227f00f97bf2711..7cb56c6ca35154f19d3a1f4242845f8242a37202 100644 (file)
@@ -325,6 +325,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
 {
        real_mode = rmode;
 
+       sanitize_boot_params(real_mode);
+
        if (real_mode->screen_info.orig_video_mode == 7) {
                vidmem = (char *) 0xb0000;
                vidport = 0x3b4;
index 0e6dc0ee0eeabd04fc283bce937b734ea42923c7..674019d8e2355901b69de5c55cfb92669c5628be 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/page.h>
 #include <asm/boot.h>
 #include <asm/bootparam.h>
+#include <asm/bootparam_utils.h>
 
 #define BOOT_BOOT_H
 #include "../ctype.h"
index 8c132a625b94991c179def21973bb8ad3c3a56d5..944ce595f767621f07a47d6c89cdea9415627663 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/e820.h>
 #include <asm/page_types.h>
 #include <asm/setup.h>
+#include <asm/bootparam.h>
 #include "boot.h"
 #include "voffset.h"
 #include "zoffset.h"
@@ -255,6 +256,9 @@ section_table:
        # header, from the old boot sector.
 
        .section ".header", "a"
+       .globl  sentinel
+sentinel:      .byte 0xff, 0xff        /* Used to detect broken loaders */
+
        .globl  hdr
 hdr:
 setup_sects:   .byte 0                 /* Filled in by build.c */
@@ -279,7 +283,7 @@ _start:
        # Part 2 of the header, from the old setup.S
 
                .ascii  "HdrS"          # header signature
-               .word   0x020b          # header version number (>= 0x0105)
+               .word   0x020c          # header version number (>= 0x0105)
                                        # or else old loadlin-1.5 will fail)
                .globl realmode_swtch
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
@@ -297,13 +301,7 @@ type_of_loader:    .byte   0               # 0 means ancient bootloader, newer
 
 # flags, unused bits must be zero (RFU) bit within loadflags
 loadflags:
-LOADED_HIGH    = 1                     # If set, the kernel is loaded high
-CAN_USE_HEAP   = 0x80                  # If set, the loader also has set
-                                       # heap_end_ptr to tell how much
-                                       # space behind setup.S can be used for
-                                       # heap purposes.
-                                       # Only the loader knows what is free
-               .byte   LOADED_HIGH
+               .byte   LOADED_HIGH     # The kernel is to be loaded high
 
 setup_move_size: .word  0x8000         # size to move, when setup is not
                                        # loaded at 0x90000. We will move setup
@@ -369,7 +367,23 @@ relocatable_kernel:    .byte 1
 relocatable_kernel:    .byte 0
 #endif
 min_alignment:         .byte MIN_KERNEL_ALIGN_LG2      # minimum alignment
-pad3:                  .word 0
+
+xloadflags:
+#ifdef CONFIG_X86_64
+# define XLF0 XLF_KERNEL_64                    /* 64-bit kernel */
+#else
+# define XLF0 0
+#endif
+#ifdef CONFIG_EFI_STUB
+# ifdef CONFIG_X86_64
+#  define XLF23 XLF_EFI_HANDOVER_64            /* 64-bit EFI handover ok */
+# else
+#  define XLF23 XLF_EFI_HANDOVER_32            /* 32-bit EFI handover ok */
+# endif
+#else
+# define XLF23 0
+#endif
+                       .word XLF0 | XLF23
 
 cmdline_size:   .long   COMMAND_LINE_SIZE-1     #length of the command line,
                                                 #added with boot protocol
@@ -397,8 +411,13 @@ pref_address:              .quad LOAD_PHYSICAL_ADDR        # preferred load addr
 #define INIT_SIZE VO_INIT_SIZE
 #endif
 init_size:             .long INIT_SIZE         # kernel initialization size
-handover_offset:       .long 0x30              # offset to the handover
+handover_offset:
+#ifdef CONFIG_EFI_STUB
+                       .long 0x30              # offset to the handover
                                                # protocol entry point
+#else
+                       .long 0
+#endif
 
 # End of setup header #####################################################
 
index 03c0683636b6fbf859a8795262f249631e6157f9..96a6c7563538364d2dee7e307846815156c11c33 100644 (file)
@@ -13,7 +13,7 @@ SECTIONS
        .bstext         : { *(.bstext) }
        .bsdata         : { *(.bsdata) }
 
-       . = 497;
+       . = 495;
        .header         : { *(.header) }
        .entrytext      : { *(.entrytext) }
        .inittext       : { *(.inittext) }
index 4b8e165ee5723643dcd89619a341c7559eafe2bd..94c54465002003e34af8f6fb3d14a1be839fdeba 100644 (file)
@@ -52,6 +52,10 @@ int is_big_kernel;
 
 #define PECOFF_RELOC_RESERVE 0x20
 
+unsigned long efi_stub_entry;
+unsigned long efi_pe_entry;
+unsigned long startup_64;
+
 /*----------------------------------------------------------------------*/
 
 static const u32 crctab32[] = {
@@ -132,7 +136,7 @@ static void die(const char * str, ...)
 
 static void usage(void)
 {
-       die("Usage: build setup system [> image]");
+       die("Usage: build setup system [zoffset.h] [> image]");
 }
 
 #ifdef CONFIG_EFI_STUB
@@ -206,30 +210,54 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
         */
        put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
 
-#ifdef CONFIG_X86_32
        /*
-        * Address of entry point.
-        *
-        * The EFI stub entry point is +16 bytes from the start of
-        * the .text section.
+        * Address of entry point for PE/COFF executable
         */
-       put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
-#else
-       /*
-        * Address of entry point. startup_32 is at the beginning and
-        * the 64-bit entry point (startup_64) is always 512 bytes
-        * after. The EFI stub entry point is 16 bytes after that, as
-        * the first instruction allows legacy loaders to jump over
-        * the EFI stub initialisation
-        */
-       put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
-#endif /* CONFIG_X86_32 */
+       put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
 
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
 #endif /* CONFIG_EFI_STUB */
 
+
+/*
+ * Parse zoffset.h and find the entry points. We could just #include zoffset.h
+ * but that would mean tools/build would have to be rebuilt every time. It's
+ * not as if parsing it is hard...
+ */
+#define PARSE_ZOFS(p, sym) do { \
+       if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym)))       \
+               sym = strtoul(p + 11 + sizeof(#sym), NULL, 16);         \
+} while (0)
+
+static void parse_zoffset(char *fname)
+{
+       FILE *file;
+       char *p;
+       int c;
+
+       file = fopen(fname, "r");
+       if (!file)
+               die("Unable to open `%s': %m", fname);
+       c = fread(buf, 1, sizeof(buf) - 1, file);
+       if (ferror(file))
+               die("read-error on `zoffset.h'");
+       buf[c] = 0;
+
+       p = (char *)buf;
+
+       while (p && *p) {
+               PARSE_ZOFS(p, efi_stub_entry);
+               PARSE_ZOFS(p, efi_pe_entry);
+               PARSE_ZOFS(p, startup_64);
+
+               p = strchr(p, '\n');
+               while (p && (*p == '\r' || *p == '\n'))
+                       p++;
+       }
+}
+
 int main(int argc, char ** argv)
 {
        unsigned int i, sz, setup_sectors;
@@ -241,7 +269,19 @@ int main(int argc, char ** argv)
        void *kernel;
        u32 crc = 0xffffffffUL;
 
-       if (argc != 3)
+       /* Defaults for old kernel */
+#ifdef CONFIG_X86_32
+       efi_pe_entry = 0x10;
+       efi_stub_entry = 0x30;
+#else
+       efi_pe_entry = 0x210;
+       efi_stub_entry = 0x230;
+       startup_64 = 0x200;
+#endif
+
+       if (argc == 4)
+               parse_zoffset(argv[3]);
+       else if (argc != 3)
                usage();
 
        /* Copy the setup code */
@@ -299,6 +339,11 @@ int main(int argc, char ** argv)
 
 #ifdef CONFIG_EFI_STUB
        update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+
+#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
+       efi_stub_entry -= 0x200;
+#endif
+       put_unaligned_le32(efi_stub_entry, &buf[0x264]);
 #endif
 
        crc = partial_crc32(buf, i, crc);
index 5598547281a75a4a0b4fc6a31cfecfcec3540022..94447086e551e3a8b4102a0e46800bc39c7ff172 100644 (file)
@@ -1,3 +1,4 @@
+# CONFIG_64BIT is not set
 CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
index 102ff7cb3e4146ef1e158d718c49c1c0a6773f7b..142c4ceff1122cadb5dd92ddb0fab05c64f0c48e 100644 (file)
@@ -207,7 +207,7 @@ sysexit_from_sys_call:
        testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        jnz ia32_ret_from_sys_call
        TRACE_IRQS_ON
-       sti
+       ENABLE_INTERRUPTS(CLBR_NONE)
        movl %eax,%esi          /* second arg, syscall return value */
        cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
        jbe 1f
@@ -217,7 +217,7 @@ sysexit_from_sys_call:
        call __audit_syscall_exit
        movq RAX-ARGOFFSET(%rsp),%rax   /* reload syscall return value */
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
-       cli
+       DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        jz \exit
index b3341e9cd8fdee3a25e7582563564d813594b6a3..a54ee1d054d92e66a382dbed20bdc203cfe3f30b 100644 (file)
@@ -81,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
        return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
 }
 
+static inline u16 amd_get_node_id(struct pci_dev *pdev)
+{
+       struct pci_dev *misc;
+       int i;
+
+       for (i = 0; i != amd_nb_num(); i++) {
+               misc = node_to_amd_nb(i)->misc;
+
+               if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
+                   PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
+                       return i;
+       }
+
+       WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
+       return 0;
+}
+
 #else
 
 #define amd_nb_num(x)          0
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
new file mode 100644 (file)
index 0000000..5b5e9cb
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef _ASM_X86_BOOTPARAM_UTILS_H
+#define _ASM_X86_BOOTPARAM_UTILS_H
+
+#include <asm/bootparam.h>
+
+/*
+ * This file is included from multiple environments.  Do not
+ * add completing #includes to make it standalone.
+ */
+
+/*
+ * Deal with bootloaders which fail to initialize unknown fields in
+ * boot_params to zero.  The list fields in this list are taken from
+ * analysis of kexec-tools; if other broken bootloaders initialize a
+ * different set of fields we will need to figure out how to disambiguate.
+ *
+ */
+static void sanitize_boot_params(struct boot_params *boot_params)
+{
+       if (boot_params->sentinel) {
+               /*fields in boot_params are not valid, clear them */
+               memset(&boot_params->olpc_ofw_header, 0,
+                      (char *)&boot_params->alt_mem_k -
+                       (char *)&boot_params->olpc_ofw_header);
+               memset(&boot_params->kbd_status, 0,
+                      (char *)&boot_params->hdr -
+                      (char *)&boot_params->kbd_status);
+               memset(&boot_params->_pad7[0], 0,
+                      (char *)&boot_params->edd_mbr_sig_buffer[0] -
+                       (char *)&boot_params->_pad7[0]);
+               memset(&boot_params->_pad8[0], 0,
+                      (char *)&boot_params->eddbuf[0] -
+                       (char *)&boot_params->_pad8[0]);
+               memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+       }
+}
+
+#endif /* _ASM_X86_BOOTPARAM_UTILS_H */
index 2d9075e863a0cfc25e43b7d40e706e8016586859..93fe929d1cee20ec1dc183d5c390e5d9325b26ce 100644 (file)
 #define X86_FEATURE_TBM                (6*32+21) /* trailing bit manipulations */
 #define X86_FEATURE_TOPOEXT    (6*32+22) /* topology extensions CPUID leafs */
 #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB  (6*32+24) /* NB performance counter extensions */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
 #define cpu_has_pclmulqdq      boot_cpu_has(X86_FEATURE_PCLMULQDQ)
 #define cpu_has_perfctr_core   boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
+#define cpu_has_perfctr_nb     boot_cpu_has(X86_FEATURE_PERFCTR_NB)
 #define cpu_has_cx8            boot_cpu_has(X86_FEATURE_CX8)
 #define cpu_has_cx16           boot_cpu_has(X86_FEATURE_CX16)
 #define cpu_has_eager_fpu      boot_cpu_has(X86_FEATURE_EAGER_FPU)
index 6e8fdf5ad1135c0100c8b7a5220bb79db2359ddb..28677c55113f8cea59e59c67e7cd53cfcd615b6d 100644 (file)
@@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
 #endif /* CONFIG_X86_32 */
 
 extern int add_efi_memmap;
+extern unsigned long x86_efi_facility;
 extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
 extern int efi_memblock_x86_reserve_range(void);
 extern void efi_call_phys_prelog(void);
index 9a25b522d37799adf0b7a00d898ba30cb70b08b7..86cb51e1ca96abc11dda6317293a775aebbdf51e 100644 (file)
@@ -44,7 +44,6 @@
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define ARCH_SUPPORTS_FTRACE_OPS 1
-#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
 #endif
 
 #ifndef __ASSEMBLY__
index 434e2106cc870d48e1f142e60a436f50e04a592a..b18df579c0e99b09ff33261e692135338826de3a 100644 (file)
@@ -80,9 +80,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
 extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
 
 #ifdef CONFIG_PCI_MSI
-extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
+extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
 #else
-static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
+static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
 {
        return -EINVAL;
 }
@@ -111,6 +111,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
 static inline int hpet_enable(void) { return 0; }
 static inline int is_hpet_enabled(void) { return 0; }
 #define hpet_readl(a) 0
+#define default_setup_hpet_msi NULL
 
 #endif
 #endif /* _ASM_X86_HPET_H */
index eb92a6ed2be704ace2a935354b4667df75563394..10a78c3d3d5a771d8581a13e402eb69e462e1dae 100644 (file)
@@ -101,6 +101,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
        irq_attr->polarity      = polarity;
 }
 
+/* Intel specific interrupt remapping information */
 struct irq_2_iommu {
        struct intel_iommu *iommu;
        u16 irte_index;
@@ -108,6 +109,12 @@ struct irq_2_iommu {
        u8  irte_mask;
 };
 
+/* AMD specific interrupt remapping information */
+struct irq_2_irte {
+       u16 devid; /* Device ID for IRTE table */
+       u16 index; /* Index into IRTE table*/
+};
+
 /*
  * This is performance-critical, we want to do it O(1)
  *
@@ -120,7 +127,11 @@ struct irq_cfg {
        u8                      vector;
        u8                      move_in_progress : 1;
 #ifdef CONFIG_IRQ_REMAP
-       struct irq_2_iommu      irq_2_iommu;
+       u8                      remapped : 1;
+       union {
+               struct irq_2_iommu irq_2_iommu;
+               struct irq_2_irte  irq_2_irte;
+       };
 #endif
 };
 
index b518c7509933337225756646fcbd73a6882c2990..86095ed141356f3210fee6954f80a763df4a5bea 100644 (file)
@@ -25,6 +25,7 @@
 
 extern void init_hypervisor(struct cpuinfo_x86 *c);
 extern void init_hypervisor_platform(void);
+extern bool hypervisor_x2apic_available(void);
 
 /*
  * x86 hypervisor information
@@ -41,6 +42,9 @@ struct hypervisor_x86 {
 
        /* Platform setup (run once per boot) */
        void            (*init_platform)(void);
+
+       /* X2APIC detection (run once per boot) */
+       bool            (*x2apic_available)(void);
 };
 
 extern const struct hypervisor_x86 *x86_hyper;
@@ -51,13 +55,4 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
 extern const struct hypervisor_x86 x86_hyper_xen_hvm;
 extern const struct hypervisor_x86 x86_hyper_kvm;
 
-static inline bool hypervisor_x2apic_available(void)
-{
-       if (kvm_para_available())
-               return true;
-       if (xen_x2apic_para_available())
-               return true;
-       return false;
-}
-
 #endif
index 73d8c5398ea995dd685ad00ad99b17fd69c5c1b4..459e50a424d148eb94163a343cbf17a8b4abfad9 100644 (file)
@@ -144,11 +144,24 @@ extern int timer_through_8259;
        (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
 
 struct io_apic_irq_attr;
+struct irq_cfg;
 extern int io_apic_set_pci_routing(struct device *dev, int irq,
                 struct io_apic_irq_attr *irq_attr);
 void setup_IO_APIC_irq_extra(u32 gsi);
 extern void ioapic_insert_resources(void);
 
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+                                    unsigned int, int,
+                                    struct io_apic_irq_attr *);
+extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
+                                    unsigned int, int,
+                                    struct io_apic_irq_attr *);
+extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
+
+extern void native_compose_msi_msg(struct pci_dev *pdev,
+                                  unsigned int irq, unsigned int dest,
+                                  struct msi_msg *msg, u8 hpet_id);
+extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
 int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
 
 extern int save_ioapic_entries(void);
@@ -179,6 +192,12 @@ extern void __init native_io_apic_init_mappings(void);
 extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
 extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
 extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
+extern void native_disable_io_apic(void);
+extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
+extern int native_ioapic_set_affinity(struct irq_data *,
+                                     const struct cpumask *,
+                                     bool);
 
 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
 {
@@ -193,6 +212,9 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
 {
        x86_io_apic_ops.modify(apic, reg, value);
 }
+
+extern void io_apic_eoi(unsigned int apic, unsigned int vector);
+
 #else  /* !CONFIG_X86_IO_APIC */
 
 #define io_apic_assign_pci_irqs 0
@@ -223,6 +245,12 @@ static inline void disable_ioapic_support(void) { }
 #define native_io_apic_read            NULL
 #define native_io_apic_write           NULL
 #define native_io_apic_modify          NULL
+#define native_disable_io_apic         NULL
+#define native_io_apic_print_entries   NULL
+#define native_ioapic_set_affinity     NULL
+#define native_setup_ioapic_entry      NULL
+#define native_compose_msi_msg         NULL
+#define native_eoi_ioapic_pin          NULL
 #endif
 
 #endif /* _ASM_X86_IO_APIC_H */
index 5fb9bbbd2f14c0ad07033b067797ed179e1d8fd8..95fd3527f632d2f0b45c47b07bada7fee46effea 100644 (file)
@@ -26,8 +26,6 @@
 
 #ifdef CONFIG_IRQ_REMAP
 
-extern int irq_remapping_enabled;
-
 extern void setup_irq_remapping_ops(void);
 extern int irq_remapping_supported(void);
 extern int irq_remapping_prepare(void);
@@ -40,21 +38,19 @@ extern int setup_ioapic_remapped_entry(int irq,
                                       unsigned int destination,
                                       int vector,
                                       struct io_apic_irq_attr *attr);
-extern int set_remapped_irq_affinity(struct irq_data *data,
-                                    const struct cpumask *mask,
-                                    bool force);
 extern void free_remapped_irq(int irq);
 extern void compose_remapped_msi_msg(struct pci_dev *pdev,
                                     unsigned int irq, unsigned int dest,
                                     struct msi_msg *msg, u8 hpet_id);
-extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
-extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
-                                 int index, int sub_handle);
 extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
+extern void panic_if_irq_remap(const char *msg);
+extern bool setup_remapped_irq(int irq,
+                              struct irq_cfg *cfg,
+                              struct irq_chip *chip);
 
-#else  /* CONFIG_IRQ_REMAP */
+void irq_remap_modify_chip_defaults(struct irq_chip *chip);
 
-#define irq_remapping_enabled  0
+#else  /* CONFIG_IRQ_REMAP */
 
 static inline void setup_irq_remapping_ops(void) { }
 static inline int irq_remapping_supported(void) { return 0; }
@@ -71,30 +67,30 @@ static inline int setup_ioapic_remapped_entry(int irq,
 {
        return -ENODEV;
 }
-static inline int set_remapped_irq_affinity(struct irq_data *data,
-                                           const struct cpumask *mask,
-                                           bool force)
-{
-       return 0;
-}
 static inline void free_remapped_irq(int irq) { }
 static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
                                            unsigned int irq, unsigned int dest,
                                            struct msi_msg *msg, u8 hpet_id)
 {
 }
-static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
 {
        return -ENODEV;
 }
-static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
-                                        int index, int sub_handle)
+
+static inline void panic_if_irq_remap(const char *msg)
+{
+}
+
+static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
 {
-       return -ENODEV;
 }
-static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
+
+static inline bool setup_remapped_irq(int irq,
+                                     struct irq_cfg *cfg,
+                                     struct irq_chip *chip)
 {
-       return -ENODEV;
+       return false;
 }
 #endif /* CONFIG_IRQ_REMAP */
 
index 1508e518c7e3b1b98a8ee19c3c33920a0bcba170..aac5fa62a86caf91c82873712543017f89df1756 100644 (file)
 
 #define UV_BAU_MESSAGE                 0xf5
 
-/* Xen vector callback to receive events in a HVM domain */
-#define XEN_HVM_EVTCHN_CALLBACK                0xf3
+/* Vector on which hypervisor callbacks will be delivered */
+#define HYPERVISOR_CALLBACK_VECTOR     0xf3
 
 /*
  * Local APIC timer IRQ vector is on a different priority level,
index 5ed1f16187be177dc799531f4a1e0febd6f38d97..65231e173bafceb5895f49da00b95f39c3088beb 100644 (file)
@@ -85,13 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
        return ret;
 }
 
-static inline int kvm_para_available(void)
+static inline bool kvm_para_available(void)
 {
        unsigned int eax, ebx, ecx, edx;
        char signature[13];
 
        if (boot_cpu_data.cpuid_level < 0)
-               return 0;       /* So we don't blow up on old processors */
+               return false;   /* So we don't blow up on old processors */
 
        if (cpu_has_hypervisor) {
                cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
@@ -101,10 +101,10 @@ static inline int kvm_para_available(void)
                signature[12] = 0;
 
                if (strcmp(signature, "KVMKVMKVM") == 0)
-                       return 1;
+                       return true;
        }
 
-       return 0;
+       return false;
 }
 
 static inline unsigned int kvm_arch_para_features(void)
index 48142971b25d095b9724a06a6eff47b2cf302532..79327e9483a34ec33c3ba71bbdc94397f2456f46 100644 (file)
 #define __asmlinkage_protect0(ret) \
        __asmlinkage_protect_n(ret)
 #define __asmlinkage_protect1(ret, arg1) \
-       __asmlinkage_protect_n(ret, "g" (arg1))
+       __asmlinkage_protect_n(ret, "m" (arg1))
 #define __asmlinkage_protect2(ret, arg1, arg2) \
-       __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
 #define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
-       __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
 #define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
-       __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
-                             "g" (arg4))
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4))
 #define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
-       __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
-                             "g" (arg4), "g" (arg5))
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5))
 #define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
-       __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
-                             "g" (arg4), "g" (arg5), "g" (arg6))
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5), "m" (arg6))
 
 #endif /* CONFIG_X86_32 */
 
index ecdfee60ee4afb0e3b21e3c313420fc565468e56..f4076af1f4ed695480318634837173827b18b34d 100644 (file)
@@ -3,6 +3,90 @@
 
 #include <uapi/asm/mce.h>
 
+/*
+ * Machine Check support for x86
+ */
+
+/* MCG_CAP register defines */
+#define MCG_BANKCNT_MASK       0xff         /* Number of Banks */
+#define MCG_CTL_P              (1ULL<<8)    /* MCG_CTL register available */
+#define MCG_EXT_P              (1ULL<<9)    /* Extended registers available */
+#define MCG_CMCI_P             (1ULL<<10)   /* CMCI supported */
+#define MCG_EXT_CNT_MASK       0xff0000     /* Number of Extended registers */
+#define MCG_EXT_CNT_SHIFT      16
+#define MCG_EXT_CNT(c)         (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
+#define MCG_SER_P              (1ULL<<24)   /* MCA recovery/new status bits */
+
+/* MCG_STATUS register defines */
+#define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */
+#define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */
+#define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */
+
+/* MCi_STATUS register defines */
+#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */
+#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */
+#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */
+#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */
+#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
+#define MCI_STATUS_S    (1ULL<<56)  /* Signaled machine check */
+#define MCI_STATUS_AR   (1ULL<<55)  /* Action required */
+#define MCACOD           0xffff     /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB   0x00C0  /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK        0xfff0
+#define MCACOD_L3WB    0x017A  /* L3 Explicit Writeback */
+#define MCACOD_DATA    0x0134  /* Data Load */
+#define MCACOD_INSTR   0x0150  /* Instruction Fetch */
+
+/* MCi_MISC register defines */
+#define MCI_MISC_ADDR_LSB(m)   ((m) & 0x3f)
+#define MCI_MISC_ADDR_MODE(m)  (((m) >> 6) & 7)
+#define  MCI_MISC_ADDR_SEGOFF  0       /* segment offset */
+#define  MCI_MISC_ADDR_LINEAR  1       /* linear address */
+#define  MCI_MISC_ADDR_PHYS    2       /* physical address */
+#define  MCI_MISC_ADDR_MEM     3       /* memory address */
+#define  MCI_MISC_ADDR_GENERIC 7       /* generic */
+
+/* CTL2 register defines */
+#define MCI_CTL2_CMCI_EN               (1ULL << 30)
+#define MCI_CTL2_CMCI_THRESHOLD_MASK   0x7fffULL
+
+#define MCJ_CTX_MASK           3
+#define MCJ_CTX(flags)         ((flags) & MCJ_CTX_MASK)
+#define MCJ_CTX_RANDOM         0    /* inject context: random */
+#define MCJ_CTX_PROCESS                0x1  /* inject context: process */
+#define MCJ_CTX_IRQ            0x2  /* inject context: IRQ */
+#define MCJ_NMI_BROADCAST      0x4  /* do NMI broadcasting */
+#define MCJ_EXCEPTION          0x8  /* raise as exception */
+#define MCJ_IRQ_BRAODCAST      0x10 /* do IRQ broadcasting */
+
+#define MCE_OVERFLOW 0         /* bit 0 in flags means overflow */
+
+/* Software defined banks */
+#define MCE_EXTENDED_BANK      128
+#define MCE_THERMAL_BANK       (MCE_EXTENDED_BANK + 0)
+#define K8_MCE_THRESHOLD_BASE   (MCE_EXTENDED_BANK + 1)
+
+#define MCE_LOG_LEN 32
+#define MCE_LOG_SIGNATURE      "MACHINECHECK"
+
+/*
+ * This structure contains all data related to the MCE log.  Also
+ * carries a signature to make it easier to find from external
+ * debugging tools.  Each entry is only valid when its finished flag
+ * is set.
+ */
+struct mce_log {
+       char signature[12]; /* "MACHINECHECK" */
+       unsigned len;       /* = MCE_LOG_LEN */
+       unsigned next;
+       unsigned flags;
+       unsigned recordlen;     /* length of struct mce */
+       struct mce entry[MCE_LOG_LEN];
+};
 
 struct mca_config {
        bool dont_log_ce;
index 79ce5685ab64168bc8314566a0e9d976aa3335de..c2934be2446a73df2d66c97e32911b5d47860f98 100644 (file)
@@ -11,4 +11,8 @@ struct ms_hyperv_info {
 
 extern struct ms_hyperv_info ms_hyperv;
 
+void hyperv_callback_vector(void);
+void hyperv_vector_handler(struct pt_regs *regs);
+void hv_register_vmbus_handler(int irq, irq_handler_t handler);
+
 #endif
index dba7805176bf3cf705b5308d6c7d13b5142f9a61..c28fd02f4bf76e3fc7cc65bbfb946d5c324101f2 100644 (file)
@@ -121,9 +121,12 @@ static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
 #define arch_teardown_msi_irq x86_teardown_msi_irq
 #define arch_restore_msi_irqs x86_restore_msi_irqs
 /* implemented in arch/x86/kernel/apic/io_apic. */
+struct msi_desc;
 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
 void native_teardown_msi_irq(unsigned int irq);
 void native_restore_msi_irqs(struct pci_dev *dev, int irq);
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+                 unsigned int irq_base, unsigned int irq_offset);
 /* default to the implementation in drivers/lib/msi.c */
 #define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
 #define HAVE_DEFAULT_MSI_RESTORE_IRQS
index 4fabcdf1cfa74b6f6c7c04327e43fb5c7c1466ac..57cb634022136f39f3c9b4eebe9390b917717d89 100644 (file)
 #define ARCH_PERFMON_EVENTSEL_INV                      (1ULL << 23)
 #define ARCH_PERFMON_EVENTSEL_CMASK                    0xFF000000ULL
 
-#define AMD_PERFMON_EVENTSEL_GUESTONLY                 (1ULL << 40)
-#define AMD_PERFMON_EVENTSEL_HOSTONLY                  (1ULL << 41)
+#define AMD64_EVENTSEL_INT_CORE_ENABLE                 (1ULL << 36)
+#define AMD64_EVENTSEL_GUESTONLY                       (1ULL << 40)
+#define AMD64_EVENTSEL_HOSTONLY                                (1ULL << 41)
+
+#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT              37
+#define AMD64_EVENTSEL_INT_CORE_SEL_MASK               \
+       (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
 
 #define AMD64_EVENTSEL_EVENT   \
        (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
 #define AMD64_RAW_EVENT_MASK           \
        (X86_RAW_EVENT_MASK          |  \
         AMD64_EVENTSEL_EVENT)
+#define AMD64_RAW_EVENT_MASK_NB                \
+       (AMD64_EVENTSEL_EVENT        |  \
+        ARCH_PERFMON_EVENTSEL_UMASK)
 #define AMD64_NUM_COUNTERS                             4
 #define AMD64_NUM_COUNTERS_CORE                                6
+#define AMD64_NUM_COUNTERS_NB                          4
 
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL          0x3c
 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK                (0x00 << 8)
index 5199db2923d31ff88b94c54397daae2b279a7bc7..fc304279b559d36ebe464206d56611c03e27b9d3 100644 (file)
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 }
 
+static inline unsigned long pud_pfn(pud_t pud)
+{
+       return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
 #define pte_page(pte)  pfn_to_page(pte_pfn(pte))
 
 static inline int pmd_large(pmd_t pte)
@@ -781,6 +786,18 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
        memcpy(dst, src, count * sizeof(pgd_t));
 }
 
+/*
+ * The x86 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+               unsigned long addr, pte_t *ptep)
+{
+}
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+               unsigned long addr, pmd_t *pmd)
+{
+}
 
 #include <asm-generic/pgtable.h>
 #endif /* __ASSEMBLY__ */
index 8faa215a503e54469359ab25376cc316e7703f82..9ee322103c6da79835fbae00772e9d6a7d60135c 100644 (file)
@@ -66,13 +66,6 @@ do {                                         \
        __flush_tlb_one((vaddr));               \
 } while (0)
 
-/*
- * The i386 doesn't have any external MMU info: the kernel page
- * tables contain all the necessary information.
- */
-#define update_mmu_cache(vma, address, ptep) do { } while (0)
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-
 #endif /* !__ASSEMBLY__ */
 
 /*
index 47356f9df82e411ca3aa4e0025611d37451064b1..615b0c78449fd474257a3a63bd58c5c4ba52042f 100644 (file)
@@ -142,9 +142,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
 #define pte_unmap(pte) ((void)(pte))/* NOP */
 
-#define update_mmu_cache(vma, address, ptep) do { } while (0)
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
-
 /* Encode and de-code a swap entry */
 #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
index 888184b2fc85c7987a63d365e9bfd1d334eb9372..cf500543f6ffd6334d67a8caaf93a80504d65699 100644 (file)
@@ -943,7 +943,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
 extern int get_tsc_mode(unsigned long adr);
 extern int set_tsc_mode(unsigned int val);
 
-extern int amd_get_nb_id(int cpu);
+extern u16 amd_get_nb_id(int cpu);
 
 struct aperfmperf {
        u64 aperf, mperf;
index 6c7fc25f2c34a0eab822c2d6ab17c3b1cacbf0a0..5c6e4fb370f5aac25ee36002022ed72a59592f5e 100644 (file)
 # define NEED_NOPL     0
 #endif
 
+#ifdef CONFIG_MATOM
+# define NEED_MOVBE    (1<<(X86_FEATURE_MOVBE & 31))
+#else
+# define NEED_MOVBE    0
+#endif
+
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_PARAVIRT
 /* Paravirtualized systems may not have PSE or PGE available */
@@ -80,7 +86,7 @@
 
 #define REQUIRED_MASK2 0
 #define REQUIRED_MASK3 (NEED_NOPL)
-#define REQUIRED_MASK4 0
+#define REQUIRED_MASK4 (NEED_MOVBE)
 #define REQUIRED_MASK5 0
 #define REQUIRED_MASK6 0
 #define REQUIRED_MASK7 0
index b47c2a82ff1546a7efcd77a820a990d1e08bef00..062921ef34e9136100d3b4820826642dd45f223b 100644 (file)
@@ -16,7 +16,7 @@ extern void uv_system_init(void);
 extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                                 struct mm_struct *mm,
                                                 unsigned long start,
-                                                unsigned end,
+                                                unsigned long end,
                                                 unsigned int cpu);
 
 #else  /* X86_UV */
index 21f7385badb8f9eb4249aef142487e27b264b32d..2c32df95bb7834737a2a7e440d29ebbea408fa82 100644 (file)
@@ -5,7 +5,7 @@
  *
  * SGI UV architectural definitions
  *
- * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_X86_UV_UV_HUB_H
@@ -175,6 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
  */
 #define UV1_HUB_REVISION_BASE          1
 #define UV2_HUB_REVISION_BASE          3
+#define UV3_HUB_REVISION_BASE          5
 
 static inline int is_uv1_hub(void)
 {
@@ -182,6 +183,23 @@ static inline int is_uv1_hub(void)
 }
 
 static inline int is_uv2_hub(void)
+{
+       return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
+               (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
+}
+
+static inline int is_uv3_hub(void)
+{
+       return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
+}
+
+static inline int is_uv_hub(void)
+{
+       return uv_hub_info->hub_revision;
+}
+
+/* code common to uv2 and uv3 only */
+static inline int is_uvx_hub(void)
 {
        return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
 }
@@ -230,14 +248,23 @@ union uvh_apicid {
 #define UV2_LOCAL_MMR_SIZE             (32UL * 1024 * 1024)
 #define UV2_GLOBAL_MMR32_SIZE          (32UL * 1024 * 1024)
 
-#define UV_LOCAL_MMR_BASE              (is_uv1_hub() ? UV1_LOCAL_MMR_BASE     \
-                                               : UV2_LOCAL_MMR_BASE)
-#define UV_GLOBAL_MMR32_BASE           (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE  \
-                                               : UV2_GLOBAL_MMR32_BASE)
-#define UV_LOCAL_MMR_SIZE              (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE :   \
-                                               UV2_LOCAL_MMR_SIZE)
+#define UV3_LOCAL_MMR_BASE             0xfa000000UL
+#define UV3_GLOBAL_MMR32_BASE          0xfc000000UL
+#define UV3_LOCAL_MMR_SIZE             (32UL * 1024 * 1024)
+#define UV3_GLOBAL_MMR32_SIZE          (32UL * 1024 * 1024)
+
+#define UV_LOCAL_MMR_BASE              (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
+                                       (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
+                                                       UV3_LOCAL_MMR_BASE))
+#define UV_GLOBAL_MMR32_BASE           (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
+                                       (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
+                                                       UV3_GLOBAL_MMR32_BASE))
+#define UV_LOCAL_MMR_SIZE              (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
+                                       (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
+                                                       UV3_LOCAL_MMR_SIZE))
 #define UV_GLOBAL_MMR32_SIZE           (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
-                                               UV2_GLOBAL_MMR32_SIZE)
+                                       (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
+                                                       UV3_GLOBAL_MMR32_SIZE))
 #define UV_GLOBAL_MMR64_BASE           (uv_hub_info->global_mmr_base)
 
 #define UV_GLOBAL_GRU_MMR_BASE         0x4000000
@@ -599,6 +626,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
  *     1 - UV1 rev 1.0 initial silicon
  *     2 - UV1 rev 2.0 production silicon
  *     3 - UV2 rev 1.0 initial silicon
+ *     5 - UV3 rev 1.0 initial silicon
  */
 static inline int uv_get_min_hub_revision_id(void)
 {
index cf1d73643f60723dc514b52e66969ed46a9c01a4..bd5f80e58a236b08961d898f37004670a52e9e02 100644 (file)
@@ -5,16 +5,25 @@
  *
  * SGI UV MMR definitions
  *
- * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_X86_UV_UV_MMRS_H
 #define _ASM_X86_UV_UV_MMRS_H
 
 /*
- * This file contains MMR definitions for both UV1 & UV2 hubs.
+ * This file contains MMR definitions for all UV hubs types.
  *
- * In general, MMR addresses and structures are identical on both hubs.
+ * To minimize coding differences between hub types, the symbols are
+ * grouped by architecture types.
+ *
+ * UVH  - definitions common to all UV hub types.
+ * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
+ * UV1H - definitions specific to UV type 1 hub.
+ * UV2H - definitions specific to UV type 2 hub.
+ * UV3H - definitions specific to UV type 3 hub.
+ *
+ * So in general, MMR addresses and structures are identical on all hubs types.
  * These MMRs are identified as:
  *     #define UVH_xxx         <address>
  *     union uvh_xxx {
  *             } s;
  *     };
  *
- * If the MMR exists on both hub type but has different addresses or
- * contents, the MMR definition is similar to:
- *     #define UV1H_xxx        <uv1 address>
- *     #define UV2H_xxx        <uv2address>
- *     #define UVH_xxx         (is_uv1_hub() ? UV1H_xxx : UV2H_xxx)
+ * If the MMR exists on all hub types but have different addresses:
+ *     #define UV1Hxxx a
+ *     #define UV2Hxxx b
+ *     #define UV3Hxxx c
+ *     #define UVHxxx  (is_uv1_hub() ? UV1Hxxx :
+ *                     (is_uv2_hub() ? UV2Hxxx :
+ *                                     UV3Hxxx))
+ *
+ * If the MMR exists on all hub types > 1 but have different addresses:
+ *     #define UV2Hxxx b
+ *     #define UV3Hxxx c
+ *     #define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
+ *                                     UV3Hxxx))
+ *
  *     union uvh_xxx {
  *             unsigned long       v;
- *             struct uv1h_int_cmpd_s {         (Common fields only)
+ *             struct uvh_xxx_s {       # Common fields only
  *             } s;
- *             struct uv1h_int_cmpd_s {         (Full UV1 definition)
+ *             struct uv1h_xxx_s {      # Full UV1 definition (*)
  *             } s1;
- *             struct uv2h_int_cmpd_s {         (Full UV2 definition)
+ *             struct uv2h_xxx_s {      # Full UV2 definition (*)
  *             } s2;
+ *             struct uv3h_xxx_s {      # Full UV3 definition (*)
+ *             } s3;
  *     };
+ *             (* - if present and different than the common struct)
  *
- * Only essential difference are enumerated. For example, if the address is
- * the same for both UV1 & UV2, only a single #define is generated. Likewise,
- * if the contents is the same for both hubs, only the "s" structure is
+ * Only essential differences are enumerated. For example, if the address is
+ * the same for all UV's, only a single #define is generated. Likewise,
+ * if the contents is the same for all hubs, only the "s" structure is
  * generated.
  *
  * If the MMR exists on ONLY 1 type of hub, no generic definition is
@@ -51,6 +72,8 @@
  *             struct uvh_int_cmpd_s {
  *             } sn;
  *     };
+ *
+ * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
  */
 
 #define UV_MMR_ENABLE          (1UL << 63)
 #define UV1_HUB_PART_NUMBER    0x88a5
 #define UV2_HUB_PART_NUMBER    0x8eb8
 #define UV2_HUB_PART_NUMBER_X  0x1111
+#define UV3_HUB_PART_NUMBER    0x9578
+#define UV3_HUB_PART_NUMBER_X  0x4321
 
-/* Compat: if this #define is present, UV headers support UV2 */
+/* Compat: Indicate which UV Hubs are supported. */
 #define UV2_HUB_IS_SUPPORTED   1
+#define UV3_HUB_IS_SUPPORTED   1
 
 /* ========================================================================= */
 /*                          UVH_BAU_DATA_BROADCAST                           */
 /* ========================================================================= */
-#define UVH_BAU_DATA_BROADCAST                         0x61688UL
-#define UVH_BAU_DATA_BROADCAST_32                      0x440
+#define UVH_BAU_DATA_BROADCAST 0x61688UL
+#define UVH_BAU_DATA_BROADCAST_32 0x440
 
 #define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT             0
 #define UVH_BAU_DATA_BROADCAST_ENABLE_MASK             0x0000000000000001UL
@@ -82,8 +108,8 @@ union uvh_bau_data_broadcast_u {
 /* ========================================================================= */
 /*                           UVH_BAU_DATA_CONFIG                             */
 /* ========================================================================= */
-#define UVH_BAU_DATA_CONFIG                            0x61680UL
-#define UVH_BAU_DATA_CONFIG_32                         0x438
+#define UVH_BAU_DATA_CONFIG 0x61680UL
+#define UVH_BAU_DATA_CONFIG_32 0x438
 
 #define UVH_BAU_DATA_CONFIG_VECTOR_SHFT                        0
 #define UVH_BAU_DATA_CONFIG_DM_SHFT                    8
@@ -121,10 +147,14 @@ union uvh_bau_data_config_u {
 /* ========================================================================= */
 /*                           UVH_EVENT_OCCURRED0                             */
 /* ========================================================================= */
-#define UVH_EVENT_OCCURRED0                            0x70000UL
-#define UVH_EVENT_OCCURRED0_32                         0x5e8
+#define UVH_EVENT_OCCURRED0 0x70000UL
+#define UVH_EVENT_OCCURRED0_32 0x5e8
+
+#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT              0
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT             11
+#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK              0x0000000000000001UL
+#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK             0x0000000000000800UL
 
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT             0
 #define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT            1
 #define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT            2
 #define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT             3
@@ -135,7 +165,6 @@ union uvh_bau_data_config_u {
 #define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT           8
 #define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT           9
 #define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT            10
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT            11
 #define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT            12
 #define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT            13
 #define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT            14
@@ -181,7 +210,6 @@ union uvh_bau_data_config_u {
 #define UV1H_EVENT_OCCURRED0_RTC3_SHFT                 54
 #define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT             55
 #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
-#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK             0x0000000000000001UL
 #define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK            0x0000000000000002UL
 #define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK            0x0000000000000004UL
 #define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK             0x0000000000000008UL
@@ -192,7 +220,6 @@ union uvh_bau_data_config_u {
 #define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK           0x0000000000000100UL
 #define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK           0x0000000000000200UL
 #define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK            0x0000000000000400UL
-#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK            0x0000000000000800UL
 #define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK            0x0000000000001000UL
 #define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK            0x0000000000002000UL
 #define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK            0x0000000000004000UL
@@ -239,188 +266,130 @@ union uvh_bau_data_config_u {
 #define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK             0x0080000000000000UL
 #define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
 
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT             0
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT             1
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT             2
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT            3
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT            4
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT            5
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT            6
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT            7
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT            8
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT            9
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT            10
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT            11
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT           12
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT           13
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT           14
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT           15
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT            16
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT            17
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT           18
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT           19
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT            20
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT            21
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT            22
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT           23
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT           24
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT           25
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT           26
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT            27
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT            28
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT           29
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT           30
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT  31
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT         32
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT         33
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT         34
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT         35
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT         36
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT         37
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT         38
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT         39
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT         40
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT         41
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT                42
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT                43
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT                44
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT                45
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT                46
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT                47
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT           48
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT           49
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT           50
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT           51
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT       52
-#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT              53
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT           54
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT           55
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT           56
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT           57
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT          58
-#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK             0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK             0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK             0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK            0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK            0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK            0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK            0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK            0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK            0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK            0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK            0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK            0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK           0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK           0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK           0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK           0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK            0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK            0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK           0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK           0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK            0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK            0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK            0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK           0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK           0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK           0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK           0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK            0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK            0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK           0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK           0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK  0x0000000080000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK         0x0000000100000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK         0x0000000200000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK         0x0000000400000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK         0x0000000800000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK         0x0000001000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK         0x0000002000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK         0x0000004000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK         0x0000008000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK         0x0000010000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK         0x0000020000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK                0x0000040000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK                0x0000080000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK                0x0000100000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK                0x0000200000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK                0x0000400000000000UL
-#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK                0x0000800000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK           0x0001000000000000UL
-#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK           0x0002000000000000UL
-#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK           0x0004000000000000UL
-#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK           0x0008000000000000UL
-#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK       0x0010000000000000UL
-#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK              0x0020000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK           0x0040000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK           0x0080000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK           0x0100000000000000UL
-#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK           0x0200000000000000UL
-#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK          0x0400000000000000UL
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT             1
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT             2
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT            3
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT            4
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT            5
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT            6
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT            7
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT            8
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT            9
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT            10
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT           12
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT           13
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT           14
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT           15
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT            16
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT            17
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT           18
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT           19
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT            20
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT            21
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT            22
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT           23
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT           24
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT           25
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT           26
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT            27
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT            28
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT           29
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT           30
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT  31
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT         32
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT         33
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT         34
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT         35
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT         36
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT         37
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT         38
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT         39
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT         40
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT         41
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT                42
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT                43
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT                44
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT                45
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT                46
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT                47
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT           48
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT           49
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT           50
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT           51
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT       52
+#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT              53
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT           54
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT           55
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT           56
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT           57
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT          58
+#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK             0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK             0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK            0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK            0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK            0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK            0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK            0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK            0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK            0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK            0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK           0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK           0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK           0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK           0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK            0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK            0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK           0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK           0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK            0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK            0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK            0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK           0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK           0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK           0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK           0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK            0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK            0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK           0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK           0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK  0x0000000080000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK         0x0000000100000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK         0x0000000200000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK         0x0000000400000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK         0x0000000800000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK         0x0000001000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK         0x0000002000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK         0x0000004000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK         0x0000008000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK         0x0000010000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK         0x0000020000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK                0x0000040000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK                0x0000080000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK                0x0000100000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK                0x0000200000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK                0x0000400000000000UL
+#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK                0x0000800000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK           0x0001000000000000UL
+#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK           0x0002000000000000UL
+#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK           0x0004000000000000UL
+#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK           0x0008000000000000UL
+#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK       0x0010000000000000UL
+#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK              0x0020000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK           0x0040000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK           0x0080000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK           0x0100000000000000UL
+#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK           0x0200000000000000UL
+#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK          0x0400000000000000UL
 
 union uvh_event_occurred0_u {
        unsigned long   v;
-       struct uv1h_event_occurred0_s {
+       struct uvh_event_occurred0_s {
                unsigned long   lb_hcerr:1;                     /* RW, W1C */
-               unsigned long   gr0_hcerr:1;                    /* RW, W1C */
-               unsigned long   gr1_hcerr:1;                    /* RW, W1C */
-               unsigned long   lh_hcerr:1;                     /* RW, W1C */
-               unsigned long   rh_hcerr:1;                     /* RW, W1C */
-               unsigned long   xn_hcerr:1;                     /* RW, W1C */
-               unsigned long   si_hcerr:1;                     /* RW, W1C */
-               unsigned long   lb_aoerr0:1;                    /* RW, W1C */
-               unsigned long   gr0_aoerr0:1;                   /* RW, W1C */
-               unsigned long   gr1_aoerr0:1;                   /* RW, W1C */
-               unsigned long   lh_aoerr0:1;                    /* RW, W1C */
+               unsigned long   rsvd_1_10:10;
                unsigned long   rh_aoerr0:1;                    /* RW, W1C */
-               unsigned long   xn_aoerr0:1;                    /* RW, W1C */
-               unsigned long   si_aoerr0:1;                    /* RW, W1C */
-               unsigned long   lb_aoerr1:1;                    /* RW, W1C */
-               unsigned long   gr0_aoerr1:1;                   /* RW, W1C */
-               unsigned long   gr1_aoerr1:1;                   /* RW, W1C */
-               unsigned long   lh_aoerr1:1;                    /* RW, W1C */
-               unsigned long   rh_aoerr1:1;                    /* RW, W1C */
-               unsigned long   xn_aoerr1:1;                    /* RW, W1C */
-               unsigned long   si_aoerr1:1;                    /* RW, W1C */
-               unsigned long   rh_vpi_int:1;                   /* RW, W1C */
-               unsigned long   system_shutdown_int:1;          /* RW, W1C */
-               unsigned long   lb_irq_int_0:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_1:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_2:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_3:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_4:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_5:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_6:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_7:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_8:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_9:1;                 /* RW, W1C */
-               unsigned long   lb_irq_int_10:1;                /* RW, W1C */
-               unsigned long   lb_irq_int_11:1;                /* RW, W1C */
-               unsigned long   lb_irq_int_12:1;                /* RW, W1C */
-               unsigned long   lb_irq_int_13:1;                /* RW, W1C */
-               unsigned long   lb_irq_int_14:1;                /* RW, W1C */
-               unsigned long   lb_irq_int_15:1;                /* RW, W1C */
-               unsigned long   l1_nmi_int:1;                   /* RW, W1C */
-               unsigned long   stop_clock:1;                   /* RW, W1C */
-               unsigned long   asic_to_l1:1;                   /* RW, W1C */
-               unsigned long   l1_to_asic:1;                   /* RW, W1C */
-               unsigned long   ltc_int:1;                      /* RW, W1C */
-               unsigned long   la_seq_trigger:1;               /* RW, W1C */
-               unsigned long   ipi_int:1;                      /* RW, W1C */
-               unsigned long   extio_int0:1;                   /* RW, W1C */
-               unsigned long   extio_int1:1;                   /* RW, W1C */
-               unsigned long   extio_int2:1;                   /* RW, W1C */
-               unsigned long   extio_int3:1;                   /* RW, W1C */
-               unsigned long   profile_int:1;                  /* RW, W1C */
-               unsigned long   rtc0:1;                         /* RW, W1C */
-               unsigned long   rtc1:1;                         /* RW, W1C */
-               unsigned long   rtc2:1;                         /* RW, W1C */
-               unsigned long   rtc3:1;                         /* RW, W1C */
-               unsigned long   bau_data:1;                     /* RW, W1C */
-               unsigned long   power_management_req:1;         /* RW, W1C */
-               unsigned long   rsvd_57_63:7;
-       } s1;
-       struct uv2h_event_occurred0_s {
+               unsigned long   rsvd_12_63:52;
+       } s;
+       struct uvxh_event_occurred0_s {
                unsigned long   lb_hcerr:1;                     /* RW */
                unsigned long   qp_hcerr:1;                     /* RW */
                unsigned long   rh_hcerr:1;                     /* RW */
@@ -481,19 +450,20 @@ union uvh_event_occurred0_u {
                unsigned long   extio_int3:1;                   /* RW */
                unsigned long   profile_int:1;                  /* RW */
                unsigned long   rsvd_59_63:5;
-       } s2;
+       } sx;
 };
 
 /* ========================================================================= */
 /*                        UVH_EVENT_OCCURRED0_ALIAS                          */
 /* ========================================================================= */
-#define UVH_EVENT_OCCURRED0_ALIAS                      0x0000000000070008UL
-#define UVH_EVENT_OCCURRED0_ALIAS_32                   0x5f0
+#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
+#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
+
 
 /* ========================================================================= */
 /*                         UVH_GR0_TLB_INT0_CONFIG                           */
 /* ========================================================================= */
-#define UVH_GR0_TLB_INT0_CONFIG                                0x61b00UL
+#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
 
 #define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT            0
 #define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT                        8
@@ -531,7 +501,7 @@ union uvh_gr0_tlb_int0_config_u {
 /* ========================================================================= */
 /*                         UVH_GR0_TLB_INT1_CONFIG                           */
 /* ========================================================================= */
-#define UVH_GR0_TLB_INT1_CONFIG                                0x61b40UL
+#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
 
 #define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT            0
 #define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT                        8
@@ -571,9 +541,11 @@ union uvh_gr0_tlb_int1_config_u {
 /* ========================================================================= */
 #define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
 #define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
-#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ?                                \
-                       UV1H_GR0_TLB_MMR_CONTROL :                      \
-                       UV2H_GR0_TLB_MMR_CONTROL)
+#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
+#define UVH_GR0_TLB_MMR_CONTROL                                                \
+               (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL :              \
+               (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL :              \
+                               UV3H_GR0_TLB_MMR_CONTROL))
 
 #define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT             0
 #define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT           12
@@ -611,6 +583,21 @@ union uvh_gr0_tlb_int1_config_u {
 #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK  0x0100000000000000UL
 #define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK  0x1000000000000000UL
 
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT            0
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT          12
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT    16
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT        20
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT                30
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT         31
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT      32
+#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK            0x0000000000000fffUL
+#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK          0x0000000000003000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK    0x0000000000010000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK        0x0000000000100000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK                0x0000000040000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK         0x0000000080000000UL
+#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK      0x0000000100000000UL
+
 #define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT            0
 #define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT          12
 #define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT    16
@@ -630,6 +617,23 @@ union uvh_gr0_tlb_int1_config_u {
 #define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK      0x0001000000000000UL
 #define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK   0x0010000000000000UL
 
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT            0
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT          12
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT    16
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT        20
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT          21
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT                30
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT         31
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT      32
+#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK            0x0000000000000fffUL
+#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK          0x0000000000003000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK    0x0000000000010000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK        0x0000000000100000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK          0x0000000000200000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK                0x0000000040000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK         0x0000000080000000UL
+#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK      0x0000000100000000UL
+
 union uvh_gr0_tlb_mmr_control_u {
        unsigned long   v;
        struct uvh_gr0_tlb_mmr_control_s {
@@ -642,7 +646,9 @@ union uvh_gr0_tlb_mmr_control_u {
                unsigned long   rsvd_21_29:9;
                unsigned long   mmr_write:1;                    /* WP */
                unsigned long   mmr_read:1;                     /* WP */
-               unsigned long   rsvd_32_63:32;
+               unsigned long   rsvd_32_48:17;
+               unsigned long   rsvd_49_51:3;
+               unsigned long   rsvd_52_63:12;
        } s;
        struct uv1h_gr0_tlb_mmr_control_s {
                unsigned long   index:12;                       /* RW */
@@ -666,6 +672,23 @@ union uvh_gr0_tlb_mmr_control_u {
                unsigned long   mmr_inj_tlblruv:1;              /* RW */
                unsigned long   rsvd_61_63:3;
        } s1;
+       struct uvxh_gr0_tlb_mmr_control_s {
+               unsigned long   index:12;                       /* RW */
+               unsigned long   mem_sel:2;                      /* RW */
+               unsigned long   rsvd_14_15:2;
+               unsigned long   auto_valid_en:1;                /* RW */
+               unsigned long   rsvd_17_19:3;
+               unsigned long   mmr_hash_index_en:1;            /* RW */
+               unsigned long   rsvd_21_29:9;
+               unsigned long   mmr_write:1;                    /* WP */
+               unsigned long   mmr_read:1;                     /* WP */
+               unsigned long   mmr_op_done:1;                  /* RW */
+               unsigned long   rsvd_33_47:15;
+               unsigned long   rsvd_48:1;
+               unsigned long   rsvd_49_51:3;
+               unsigned long   rsvd_52:1;
+               unsigned long   rsvd_53_63:11;
+       } sx;
        struct uv2h_gr0_tlb_mmr_control_s {
                unsigned long   index:12;                       /* RW */
                unsigned long   mem_sel:2;                      /* RW */
@@ -683,6 +706,24 @@ union uvh_gr0_tlb_mmr_control_u {
                unsigned long   mmr_inj_tlbram:1;               /* RW */
                unsigned long   rsvd_53_63:11;
        } s2;
+       struct uv3h_gr0_tlb_mmr_control_s {
+               unsigned long   index:12;                       /* RW */
+               unsigned long   mem_sel:2;                      /* RW */
+               unsigned long   rsvd_14_15:2;
+               unsigned long   auto_valid_en:1;                /* RW */
+               unsigned long   rsvd_17_19:3;
+               unsigned long   mmr_hash_index_en:1;            /* RW */
+               unsigned long   ecc_sel:1;                      /* RW */
+               unsigned long   rsvd_22_29:8;
+               unsigned long   mmr_write:1;                    /* WP */
+               unsigned long   mmr_read:1;                     /* WP */
+               unsigned long   mmr_op_done:1;                  /* RW */
+               unsigned long   rsvd_33_47:15;
+               unsigned long   undef_48:1;                     /* Undefined */
+               unsigned long   rsvd_49_51:3;
+               unsigned long   undef_52:1;                     /* Undefined */
+               unsigned long   rsvd_53_63:11;
+       } s3;
 };
 
 /* ========================================================================= */
@@ -690,9 +731,11 @@ union uvh_gr0_tlb_mmr_control_u {
 /* ========================================================================= */
 #define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
 #define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
-#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ?                   \
-                       UV1H_GR0_TLB_MMR_READ_DATA_HI :                 \
-                       UV2H_GR0_TLB_MMR_READ_DATA_HI)
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
+#define UVH_GR0_TLB_MMR_READ_DATA_HI                                   \
+               (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI :         \
+               (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI :         \
+                               UV3H_GR0_TLB_MMR_READ_DATA_HI))
 
 #define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT          0
 #define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT          41
@@ -703,6 +746,46 @@ union uvh_gr0_tlb_mmr_control_u {
 #define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK                0x0000080000000000UL
 #define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK       0x0000100000000000UL
 
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT      45
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT     55
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK      0x0000200000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK     0xff80000000000000UL
+
 union uvh_gr0_tlb_mmr_read_data_hi_u {
        unsigned long   v;
        struct uvh_gr0_tlb_mmr_read_data_hi_s {
@@ -712,6 +795,36 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
                unsigned long   larger:1;                       /* RO */
                unsigned long   rsvd_45_63:19;
        } s;
+       struct uv1h_gr0_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   rsvd_45_63:19;
+       } s1;
+       struct uvxh_gr0_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   rsvd_45_63:19;
+       } sx;
+       struct uv2h_gr0_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   rsvd_45_63:19;
+       } s2;
+       struct uv3h_gr0_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   aa_ext:1;                       /* RO */
+               unsigned long   undef_46_54:9;                  /* Undefined */
+               unsigned long   way_ecc:9;                      /* RO */
+       } s3;
 };
 
 /* ========================================================================= */
@@ -719,9 +832,11 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
 /* ========================================================================= */
 #define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
 #define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
-#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ?                   \
-                       UV1H_GR0_TLB_MMR_READ_DATA_LO :                 \
-                       UV2H_GR0_TLB_MMR_READ_DATA_LO)
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
+#define UVH_GR0_TLB_MMR_READ_DATA_LO                                   \
+               (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO :         \
+               (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO :         \
+                               UV3H_GR0_TLB_MMR_READ_DATA_LO))
 
 #define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT          0
 #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT         39
@@ -730,6 +845,34 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
 #define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK         0x7fffff8000000000UL
 #define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK                0x8000000000000000UL
 
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
 union uvh_gr0_tlb_mmr_read_data_lo_u {
        unsigned long   v;
        struct uvh_gr0_tlb_mmr_read_data_lo_s {
@@ -737,12 +880,32 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
                unsigned long   asid:24;                        /* RO */
                unsigned long   valid:1;                        /* RO */
        } s;
+       struct uv1h_gr0_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } s1;
+       struct uvxh_gr0_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } sx;
+       struct uv2h_gr0_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } s2;
+       struct uv3h_gr0_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } s3;
 };
 
 /* ========================================================================= */
 /*                         UVH_GR1_TLB_INT0_CONFIG                           */
 /* ========================================================================= */
-#define UVH_GR1_TLB_INT0_CONFIG                                0x61f00UL
+#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
 
 #define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT            0
 #define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT                        8
@@ -780,7 +943,7 @@ union uvh_gr1_tlb_int0_config_u {
 /* ========================================================================= */
 /*                         UVH_GR1_TLB_INT1_CONFIG                           */
 /* ========================================================================= */
-#define UVH_GR1_TLB_INT1_CONFIG                                0x61f40UL
+#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
 
 #define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT            0
 #define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT                        8
@@ -820,9 +983,11 @@ union uvh_gr1_tlb_int1_config_u {
 /* ========================================================================= */
 #define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
 #define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
-#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ?                                \
-                       UV1H_GR1_TLB_MMR_CONTROL :                      \
-                       UV2H_GR1_TLB_MMR_CONTROL)
+#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
+#define UVH_GR1_TLB_MMR_CONTROL                                                \
+               (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL :              \
+               (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL :              \
+                               UV3H_GR1_TLB_MMR_CONTROL))
 
 #define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT             0
 #define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT           12
@@ -860,6 +1025,21 @@ union uvh_gr1_tlb_int1_config_u {
 #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK  0x0100000000000000UL
 #define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK  0x1000000000000000UL
 
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT            0
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT          12
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT    16
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT        20
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT                30
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT         31
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT      32
+#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK            0x0000000000000fffUL
+#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK          0x0000000000003000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK    0x0000000000010000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK        0x0000000000100000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK                0x0000000040000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK         0x0000000080000000UL
+#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK      0x0000000100000000UL
+
 #define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT            0
 #define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT          12
 #define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT    16
@@ -879,6 +1059,23 @@ union uvh_gr1_tlb_int1_config_u {
 #define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK      0x0001000000000000UL
 #define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK   0x0010000000000000UL
 
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT            0
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT          12
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT    16
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT        20
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT          21
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT                30
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT         31
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT      32
+#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK            0x0000000000000fffUL
+#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK          0x0000000000003000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK    0x0000000000010000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK        0x0000000000100000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK          0x0000000000200000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK                0x0000000040000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK         0x0000000080000000UL
+#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK      0x0000000100000000UL
+
 union uvh_gr1_tlb_mmr_control_u {
        unsigned long   v;
        struct uvh_gr1_tlb_mmr_control_s {
@@ -891,7 +1088,9 @@ union uvh_gr1_tlb_mmr_control_u {
                unsigned long   rsvd_21_29:9;
                unsigned long   mmr_write:1;                    /* WP */
                unsigned long   mmr_read:1;                     /* WP */
-               unsigned long   rsvd_32_63:32;
+               unsigned long   rsvd_32_48:17;
+               unsigned long   rsvd_49_51:3;
+               unsigned long   rsvd_52_63:12;
        } s;
        struct uv1h_gr1_tlb_mmr_control_s {
                unsigned long   index:12;                       /* RW */
@@ -915,6 +1114,23 @@ union uvh_gr1_tlb_mmr_control_u {
                unsigned long   mmr_inj_tlblruv:1;              /* RW */
                unsigned long   rsvd_61_63:3;
        } s1;
+       struct uvxh_gr1_tlb_mmr_control_s {
+               unsigned long   index:12;                       /* RW */
+               unsigned long   mem_sel:2;                      /* RW */
+               unsigned long   rsvd_14_15:2;
+               unsigned long   auto_valid_en:1;                /* RW */
+               unsigned long   rsvd_17_19:3;
+               unsigned long   mmr_hash_index_en:1;            /* RW */
+               unsigned long   rsvd_21_29:9;
+               unsigned long   mmr_write:1;                    /* WP */
+               unsigned long   mmr_read:1;                     /* WP */
+               unsigned long   mmr_op_done:1;                  /* RW */
+               unsigned long   rsvd_33_47:15;
+               unsigned long   rsvd_48:1;
+               unsigned long   rsvd_49_51:3;
+               unsigned long   rsvd_52:1;
+               unsigned long   rsvd_53_63:11;
+       } sx;
        struct uv2h_gr1_tlb_mmr_control_s {
                unsigned long   index:12;                       /* RW */
                unsigned long   mem_sel:2;                      /* RW */
@@ -932,6 +1148,24 @@ union uvh_gr1_tlb_mmr_control_u {
                unsigned long   mmr_inj_tlbram:1;               /* RW */
                unsigned long   rsvd_53_63:11;
        } s2;
+       struct uv3h_gr1_tlb_mmr_control_s {
+               unsigned long   index:12;                       /* RW */
+               unsigned long   mem_sel:2;                      /* RW */
+               unsigned long   rsvd_14_15:2;
+               unsigned long   auto_valid_en:1;                /* RW */
+               unsigned long   rsvd_17_19:3;
+               unsigned long   mmr_hash_index_en:1;            /* RW */
+               unsigned long   ecc_sel:1;                      /* RW */
+               unsigned long   rsvd_22_29:8;
+               unsigned long   mmr_write:1;                    /* WP */
+               unsigned long   mmr_read:1;                     /* WP */
+               unsigned long   mmr_op_done:1;                  /* RW */
+               unsigned long   rsvd_33_47:15;
+               unsigned long   undef_48:1;                     /* Undefined */
+               unsigned long   rsvd_49_51:3;
+               unsigned long   undef_52:1;                     /* Undefined */
+               unsigned long   rsvd_53_63:11;
+       } s3;
 };
 
 /* ========================================================================= */
@@ -939,9 +1173,11 @@ union uvh_gr1_tlb_mmr_control_u {
 /* ========================================================================= */
 #define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
 #define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
-#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ?                   \
-                       UV1H_GR1_TLB_MMR_READ_DATA_HI :                 \
-                       UV2H_GR1_TLB_MMR_READ_DATA_HI)
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
+#define UVH_GR1_TLB_MMR_READ_DATA_HI                                   \
+               (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI :         \
+               (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI :         \
+                               UV3H_GR1_TLB_MMR_READ_DATA_HI))
 
 #define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT          0
 #define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT          41
@@ -952,6 +1188,46 @@ union uvh_gr1_tlb_mmr_control_u {
 #define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK                0x0000080000000000UL
 #define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK       0x0000100000000000UL
 
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT         0
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT         41
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT       43
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT      44
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT      45
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT     55
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK         0x000001ffffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK         0x0000060000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK       0x0000080000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK      0x0000100000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK      0x0000200000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK     0xff80000000000000UL
+
 union uvh_gr1_tlb_mmr_read_data_hi_u {
        unsigned long   v;
        struct uvh_gr1_tlb_mmr_read_data_hi_s {
@@ -961,6 +1237,36 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
                unsigned long   larger:1;                       /* RO */
                unsigned long   rsvd_45_63:19;
        } s;
+       struct uv1h_gr1_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   rsvd_45_63:19;
+       } s1;
+       struct uvxh_gr1_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   rsvd_45_63:19;
+       } sx;
+       struct uv2h_gr1_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   rsvd_45_63:19;
+       } s2;
+       struct uv3h_gr1_tlb_mmr_read_data_hi_s {
+               unsigned long   pfn:41;                         /* RO */
+               unsigned long   gaa:2;                          /* RO */
+               unsigned long   dirty:1;                        /* RO */
+               unsigned long   larger:1;                       /* RO */
+               unsigned long   aa_ext:1;                       /* RO */
+               unsigned long   undef_46_54:9;                  /* Undefined */
+               unsigned long   way_ecc:9;                      /* RO */
+       } s3;
 };
 
 /* ========================================================================= */
@@ -968,9 +1274,11 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
 /* ========================================================================= */
 #define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
 #define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
-#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ?                   \
-                       UV1H_GR1_TLB_MMR_READ_DATA_LO :                 \
-                       UV2H_GR1_TLB_MMR_READ_DATA_LO)
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
+#define UVH_GR1_TLB_MMR_READ_DATA_LO                                   \
+               (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO :         \
+               (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO :         \
+                               UV3H_GR1_TLB_MMR_READ_DATA_LO))
 
 #define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT          0
 #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT         39
@@ -979,6 +1287,34 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
 #define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK         0x7fffff8000000000UL
 #define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK                0x8000000000000000UL
 
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT         0
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT                39
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT       63
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK         0x0000007fffffffffUL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK                0x7fffff8000000000UL
+#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK       0x8000000000000000UL
+
 union uvh_gr1_tlb_mmr_read_data_lo_u {
        unsigned long   v;
        struct uvh_gr1_tlb_mmr_read_data_lo_s {
@@ -986,12 +1322,32 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
                unsigned long   asid:24;                        /* RO */
                unsigned long   valid:1;                        /* RO */
        } s;
+       struct uv1h_gr1_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } s1;
+       struct uvxh_gr1_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } sx;
+       struct uv2h_gr1_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } s2;
+       struct uv3h_gr1_tlb_mmr_read_data_lo_s {
+               unsigned long   vpn:39;                         /* RO */
+               unsigned long   asid:24;                        /* RO */
+               unsigned long   valid:1;                        /* RO */
+       } s3;
 };
 
 /* ========================================================================= */
 /*                               UVH_INT_CMPB                                */
 /* ========================================================================= */
-#define UVH_INT_CMPB                                   0x22080UL
+#define UVH_INT_CMPB 0x22080UL
 
 #define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT               0
 #define UVH_INT_CMPB_REAL_TIME_CMPB_MASK               0x00ffffffffffffffUL
@@ -1007,10 +1363,13 @@ union uvh_int_cmpb_u {
 /* ========================================================================= */
 /*                               UVH_INT_CMPC                                */
 /* ========================================================================= */
-#define UVH_INT_CMPC                                   0x22100UL
+#define UVH_INT_CMPC 0x22100UL
+
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT              0
+#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK              0x00ffffffffffffffUL
 
-#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT               0
-#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK               0xffffffffffffffUL
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT             0
+#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK             0x00ffffffffffffffUL
 
 union uvh_int_cmpc_u {
        unsigned long   v;
@@ -1023,10 +1382,13 @@ union uvh_int_cmpc_u {
 /* ========================================================================= */
 /*                               UVH_INT_CMPD                                */
 /* ========================================================================= */
-#define UVH_INT_CMPD                                   0x22180UL
+#define UVH_INT_CMPD 0x22180UL
 
-#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT               0
-#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK               0xffffffffffffffUL
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT              0
+#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK              0x00ffffffffffffffUL
+
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT             0
+#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK             0x00ffffffffffffffUL
 
 union uvh_int_cmpd_u {
        unsigned long   v;
@@ -1039,8 +1401,8 @@ union uvh_int_cmpd_u {
 /* ========================================================================= */
 /*                               UVH_IPI_INT                                 */
 /* ========================================================================= */
-#define UVH_IPI_INT                                    0x60500UL
-#define UVH_IPI_INT_32                                 0x348
+#define UVH_IPI_INT 0x60500UL
+#define UVH_IPI_INT_32 0x348
 
 #define UVH_IPI_INT_VECTOR_SHFT                                0
 #define UVH_IPI_INT_DELIVERY_MODE_SHFT                 8
@@ -1069,8 +1431,8 @@ union uvh_ipi_int_u {
 /* ========================================================================= */
 /*                   UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST                     */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST            0x320050UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32         0x9c0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
 
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
@@ -1091,8 +1453,8 @@ union uvh_lb_bau_intd_payload_queue_first_u {
 /* ========================================================================= */
 /*                    UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST                     */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST             0x320060UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32          0x9c8
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
 
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT        4
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK        0x000007fffffffff0UL
@@ -1109,8 +1471,8 @@ union uvh_lb_bau_intd_payload_queue_last_u {
 /* ========================================================================= */
 /*                    UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL                     */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL             0x320070UL
-#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32          0x9d0
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
+#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
 
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT        4
 #define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK        0x000007fffffffff0UL
@@ -1127,8 +1489,8 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
 /* ========================================================================= */
 /*                   UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE                    */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE           0x320080UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32                0xa68
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
 
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
 #define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
@@ -1189,14 +1551,21 @@ union uvh_lb_bau_intd_software_acknowledge_u {
 /* ========================================================================= */
 /*                UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS                 */
 /* ========================================================================= */
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS     0x0000000000320088UL
-#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32  0xa70
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
+#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
+
 
 /* ========================================================================= */
 /*                         UVH_LB_BAU_MISC_CONTROL                           */
 /* ========================================================================= */
-#define UVH_LB_BAU_MISC_CONTROL                                0x320170UL
-#define UVH_LB_BAU_MISC_CONTROL_32                     0xa10
+#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
+#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
+#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
+#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
 
 #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT   0
 #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT         8
@@ -1213,6 +1582,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
 #define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
 #define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
 #define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT               48
 #define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK   0x00000000000000ffUL
 #define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK         0x0000000000000100UL
 #define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK   0x0000000000000200UL
@@ -1228,6 +1598,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
 #define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
 #define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
 #define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK               0xffff000000000000UL
 
 #define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT  0
 #define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT                8
@@ -1262,6 +1633,53 @@ union uvh_lb_bau_intd_software_acknowledge_u {
 #define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
 #define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK              0xffff000000000000UL
 
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT  0
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT                8
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT  9
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT   10
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT              48
+#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK  0x00000000000000ffUL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK                0x0000000000000100UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK  0x0000000000000200UL
+#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK   0x0000000000000400UL
+#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK              0xffff000000000000UL
+
 #define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT  0
 #define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT                8
 #define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT  9
@@ -1309,6 +1727,59 @@ union uvh_lb_bau_intd_software_acknowledge_u {
 #define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
 #define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK              0xffff000000000000UL
 
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT  0
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT                8
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT  9
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT   10
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT              48
+#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK  0x00000000000000ffUL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK                0x0000000000000100UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK  0x0000000000000200UL
+#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK   0x0000000000000400UL
+#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
+#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
+#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
+#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
+#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
+#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK              0xffff000000000000UL
+
 union uvh_lb_bau_misc_control_u {
        unsigned long   v;
        struct uvh_lb_bau_misc_control_s {
@@ -1327,7 +1798,8 @@ union uvh_lb_bau_misc_control_u {
                unsigned long   programmed_initial_priority:3;  /* RW */
                unsigned long   use_incoming_priority:1;        /* RW */
                unsigned long   enable_programmed_initial_priority:1;/* RW */
-               unsigned long   rsvd_29_63:35;
+               unsigned long   rsvd_29_47:19;
+               unsigned long   fun:16;                         /* RW */
        } s;
        struct uv1h_lb_bau_misc_control_s {
                unsigned long   rejection_delay:8;              /* RW */
@@ -1348,6 +1820,32 @@ union uvh_lb_bau_misc_control_u {
                unsigned long   rsvd_29_47:19;
                unsigned long   fun:16;                         /* RW */
        } s1;
+       struct uvxh_lb_bau_misc_control_s {
+               unsigned long   rejection_delay:8;              /* RW */
+               unsigned long   apic_mode:1;                    /* RW */
+               unsigned long   force_broadcast:1;              /* RW */
+               unsigned long   force_lock_nop:1;               /* RW */
+               unsigned long   qpi_agent_presence_vector:3;    /* RW */
+               unsigned long   descriptor_fetch_mode:1;        /* RW */
+               unsigned long   enable_intd_soft_ack_mode:1;    /* RW */
+               unsigned long   intd_soft_ack_timeout_period:4; /* RW */
+               unsigned long   enable_dual_mapping_mode:1;     /* RW */
+               unsigned long   vga_io_port_decode_enable:1;    /* RW */
+               unsigned long   vga_io_port_16_bit_decode:1;    /* RW */
+               unsigned long   suppress_dest_registration:1;   /* RW */
+               unsigned long   programmed_initial_priority:3;  /* RW */
+               unsigned long   use_incoming_priority:1;        /* RW */
+               unsigned long   enable_programmed_initial_priority:1;/* RW */
+               unsigned long   enable_automatic_apic_mode_selection:1;/* RW */
+               unsigned long   apic_mode_status:1;             /* RO */
+               unsigned long   suppress_interrupts_to_self:1;  /* RW */
+               unsigned long   enable_lock_based_system_flush:1;/* RW */
+               unsigned long   enable_extended_sb_status:1;    /* RW */
+               unsigned long   suppress_int_prio_udt_to_self:1;/* RW */
+               unsigned long   use_legacy_descriptor_formats:1;/* RW */
+               unsigned long   rsvd_36_47:12;
+               unsigned long   fun:16;                         /* RW */
+       } sx;
        struct uv2h_lb_bau_misc_control_s {
                unsigned long   rejection_delay:8;              /* RW */
                unsigned long   apic_mode:1;                    /* RW */
@@ -1374,13 +1872,42 @@ union uvh_lb_bau_misc_control_u {
                unsigned long   rsvd_36_47:12;
                unsigned long   fun:16;                         /* RW */
        } s2;
+       struct uv3h_lb_bau_misc_control_s {
+               unsigned long   rejection_delay:8;              /* RW */
+               unsigned long   apic_mode:1;                    /* RW */
+               unsigned long   force_broadcast:1;              /* RW */
+               unsigned long   force_lock_nop:1;               /* RW */
+               unsigned long   qpi_agent_presence_vector:3;    /* RW */
+               unsigned long   descriptor_fetch_mode:1;        /* RW */
+               unsigned long   enable_intd_soft_ack_mode:1;    /* RW */
+               unsigned long   intd_soft_ack_timeout_period:4; /* RW */
+               unsigned long   enable_dual_mapping_mode:1;     /* RW */
+               unsigned long   vga_io_port_decode_enable:1;    /* RW */
+               unsigned long   vga_io_port_16_bit_decode:1;    /* RW */
+               unsigned long   suppress_dest_registration:1;   /* RW */
+               unsigned long   programmed_initial_priority:3;  /* RW */
+               unsigned long   use_incoming_priority:1;        /* RW */
+               unsigned long   enable_programmed_initial_priority:1;/* RW */
+               unsigned long   enable_automatic_apic_mode_selection:1;/* RW */
+               unsigned long   apic_mode_status:1;             /* RO */
+               unsigned long   suppress_interrupts_to_self:1;  /* RW */
+               unsigned long   enable_lock_based_system_flush:1;/* RW */
+               unsigned long   enable_extended_sb_status:1;    /* RW */
+               unsigned long   suppress_int_prio_udt_to_self:1;/* RW */
+               unsigned long   use_legacy_descriptor_formats:1;/* RW */
+               unsigned long   suppress_quiesce_msgs_to_qpi:1; /* RW */
+               unsigned long   enable_intd_prefetch_hint:1;    /* RW */
+               unsigned long   thread_kill_timebase:8;         /* RW */
+               unsigned long   rsvd_46_47:2;
+               unsigned long   fun:16;                         /* RW */
+       } s3;
 };
 
 /* ========================================================================= */
 /*                     UVH_LB_BAU_SB_ACTIVATION_CONTROL                      */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL               0x320020UL
-#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32            0x9a8
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
+#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
 
 #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT    0
 #define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT     62
@@ -1402,8 +1929,8 @@ union uvh_lb_bau_sb_activation_control_u {
 /* ========================================================================= */
 /*                    UVH_LB_BAU_SB_ACTIVATION_STATUS_0                      */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0              0x320030UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32           0x9b0
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
 
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT  0
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK  0xffffffffffffffffUL
@@ -1418,8 +1945,8 @@ union uvh_lb_bau_sb_activation_status_0_u {
 /* ========================================================================= */
 /*                    UVH_LB_BAU_SB_ACTIVATION_STATUS_1                      */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1              0x320040UL
-#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32           0x9b8
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
+#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
 
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT  0
 #define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK  0xffffffffffffffffUL
@@ -1434,8 +1961,8 @@ union uvh_lb_bau_sb_activation_status_1_u {
 /* ========================================================================= */
 /*                      UVH_LB_BAU_SB_DESCRIPTOR_BASE                        */
 /* ========================================================================= */
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE                  0x320010UL
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32               0x9a0
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
 
 #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT        12
 #define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT     49
@@ -1456,7 +1983,10 @@ union uvh_lb_bau_sb_descriptor_base_u {
 /* ========================================================================= */
 /*                               UVH_NODE_ID                                 */
 /* ========================================================================= */
-#define UVH_NODE_ID                                    0x0UL
+#define UVH_NODE_ID 0x0UL
+#define UV1H_NODE_ID 0x0UL
+#define UV2H_NODE_ID 0x0UL
+#define UV3H_NODE_ID 0x0UL
 
 #define UVH_NODE_ID_FORCE1_SHFT                                0
 #define UVH_NODE_ID_MANUFACTURER_SHFT                  1
@@ -1484,6 +2014,21 @@ union uvh_lb_bau_sb_descriptor_base_u {
 #define UV1H_NODE_ID_NODES_PER_BIT_MASK                        0x007f000000000000UL
 #define UV1H_NODE_ID_NI_PORT_MASK                      0x0f00000000000000UL
 
+#define UVXH_NODE_ID_FORCE1_SHFT                       0
+#define UVXH_NODE_ID_MANUFACTURER_SHFT                 1
+#define UVXH_NODE_ID_PART_NUMBER_SHFT                  12
+#define UVXH_NODE_ID_REVISION_SHFT                     28
+#define UVXH_NODE_ID_NODE_ID_SHFT                      32
+#define UVXH_NODE_ID_NODES_PER_BIT_SHFT                        50
+#define UVXH_NODE_ID_NI_PORT_SHFT                      57
+#define UVXH_NODE_ID_FORCE1_MASK                       0x0000000000000001UL
+#define UVXH_NODE_ID_MANUFACTURER_MASK                 0x0000000000000ffeUL
+#define UVXH_NODE_ID_PART_NUMBER_MASK                  0x000000000ffff000UL
+#define UVXH_NODE_ID_REVISION_MASK                     0x00000000f0000000UL
+#define UVXH_NODE_ID_NODE_ID_MASK                      0x00007fff00000000UL
+#define UVXH_NODE_ID_NODES_PER_BIT_MASK                        0x01fc000000000000UL
+#define UVXH_NODE_ID_NI_PORT_MASK                      0x3e00000000000000UL
+
 #define UV2H_NODE_ID_FORCE1_SHFT                       0
 #define UV2H_NODE_ID_MANUFACTURER_SHFT                 1
 #define UV2H_NODE_ID_PART_NUMBER_SHFT                  12
@@ -1499,6 +2044,25 @@ union uvh_lb_bau_sb_descriptor_base_u {
 #define UV2H_NODE_ID_NODES_PER_BIT_MASK                        0x01fc000000000000UL
 #define UV2H_NODE_ID_NI_PORT_MASK                      0x3e00000000000000UL
 
+#define UV3H_NODE_ID_FORCE1_SHFT                       0
+#define UV3H_NODE_ID_MANUFACTURER_SHFT                 1
+#define UV3H_NODE_ID_PART_NUMBER_SHFT                  12
+#define UV3H_NODE_ID_REVISION_SHFT                     28
+#define UV3H_NODE_ID_NODE_ID_SHFT                      32
+#define UV3H_NODE_ID_ROUTER_SELECT_SHFT                        48
+#define UV3H_NODE_ID_RESERVED_2_SHFT                   49
+#define UV3H_NODE_ID_NODES_PER_BIT_SHFT                        50
+#define UV3H_NODE_ID_NI_PORT_SHFT                      57
+#define UV3H_NODE_ID_FORCE1_MASK                       0x0000000000000001UL
+#define UV3H_NODE_ID_MANUFACTURER_MASK                 0x0000000000000ffeUL
+#define UV3H_NODE_ID_PART_NUMBER_MASK                  0x000000000ffff000UL
+#define UV3H_NODE_ID_REVISION_MASK                     0x00000000f0000000UL
+#define UV3H_NODE_ID_NODE_ID_MASK                      0x00007fff00000000UL
+#define UV3H_NODE_ID_ROUTER_SELECT_MASK                        0x0001000000000000UL
+#define UV3H_NODE_ID_RESERVED_2_MASK                   0x0002000000000000UL
+#define UV3H_NODE_ID_NODES_PER_BIT_MASK                        0x01fc000000000000UL
+#define UV3H_NODE_ID_NI_PORT_MASK                      0x3e00000000000000UL
+
 union uvh_node_id_u {
        unsigned long   v;
        struct uvh_node_id_s {
@@ -1521,6 +2085,17 @@ union uvh_node_id_u {
                unsigned long   ni_port:4;                      /* RO */
                unsigned long   rsvd_60_63:4;
        } s1;
+       struct uvxh_node_id_s {
+               unsigned long   force1:1;                       /* RO */
+               unsigned long   manufacturer:11;                /* RO */
+               unsigned long   part_number:16;                 /* RO */
+               unsigned long   revision:4;                     /* RO */
+               unsigned long   node_id:15;                     /* RW */
+               unsigned long   rsvd_47_49:3;
+               unsigned long   nodes_per_bit:7;                /* RO */
+               unsigned long   ni_port:5;                      /* RO */
+               unsigned long   rsvd_62_63:2;
+       } sx;
        struct uv2h_node_id_s {
                unsigned long   force1:1;                       /* RO */
                unsigned long   manufacturer:11;                /* RO */
@@ -1532,13 +2107,26 @@ union uvh_node_id_u {
                unsigned long   ni_port:5;                      /* RO */
                unsigned long   rsvd_62_63:2;
        } s2;
+       struct uv3h_node_id_s {
+               unsigned long   force1:1;                       /* RO */
+               unsigned long   manufacturer:11;                /* RO */
+               unsigned long   part_number:16;                 /* RO */
+               unsigned long   revision:4;                     /* RO */
+               unsigned long   node_id:15;                     /* RW */
+               unsigned long   rsvd_47:1;
+               unsigned long   router_select:1;                /* RO */
+               unsigned long   rsvd_49:1;
+               unsigned long   nodes_per_bit:7;                /* RO */
+               unsigned long   ni_port:5;                      /* RO */
+               unsigned long   rsvd_62_63:2;
+       } s3;
 };
 
 /* ========================================================================= */
 /*                          UVH_NODE_PRESENT_TABLE                           */
 /* ========================================================================= */
-#define UVH_NODE_PRESENT_TABLE                         0x1400UL
-#define UVH_NODE_PRESENT_TABLE_DEPTH                   16
+#define UVH_NODE_PRESENT_TABLE 0x1400UL
+#define UVH_NODE_PRESENT_TABLE_DEPTH 16
 
 #define UVH_NODE_PRESENT_TABLE_NODES_SHFT              0
 #define UVH_NODE_PRESENT_TABLE_NODES_MASK              0xffffffffffffffffUL
@@ -1553,7 +2141,7 @@ union uvh_node_present_table_u {
 /* ========================================================================= */
 /*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR       0x16000c8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
 
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
@@ -1577,7 +2165,7 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
 /* ========================================================================= */
 /*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR       0x16000d8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
 
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
@@ -1601,7 +2189,7 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
 /* ========================================================================= */
 /*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR       0x16000e8UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
 
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
@@ -1625,7 +2213,7 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR      0x16000d0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
 
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1642,7 +2230,7 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR      0x16000e0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
 
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1659,7 +2247,7 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
 /* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR                  */
 /* ========================================================================= */
-#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR      0x16000f0UL
+#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
 
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1676,7 +2264,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
 /* ========================================================================= */
 /*                          UVH_RH_GAM_CONFIG_MMR                            */
 /* ========================================================================= */
-#define UVH_RH_GAM_CONFIG_MMR                          0x1600000UL
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
+#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
 
 #define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT               0
 #define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT               6
@@ -1690,11 +2281,21 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
 #define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK              0x00000000000003c0UL
 #define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK          0x0000000000001000UL
 
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT              0
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT              6
+#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK              0x000000000000003fUL
+#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK              0x00000000000003c0UL
+
 #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT              0
 #define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT              6
 #define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK              0x000000000000003fUL
 #define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK              0x00000000000003c0UL
 
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT              0
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT              6
+#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK              0x000000000000003fUL
+#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK              0x00000000000003c0UL
+
 union uvh_rh_gam_config_mmr_u {
        unsigned long   v;
        struct uvh_rh_gam_config_mmr_s {
@@ -1709,20 +2310,37 @@ union uvh_rh_gam_config_mmr_u {
                unsigned long   mmiol_cfg:1;                    /* RW */
                unsigned long   rsvd_13_63:51;
        } s1;
+       struct uvxh_rh_gam_config_mmr_s {
+               unsigned long   m_skt:6;                        /* RW */
+               unsigned long   n_skt:4;                        /* RW */
+               unsigned long   rsvd_10_63:54;
+       } sx;
        struct uv2h_rh_gam_config_mmr_s {
                unsigned long   m_skt:6;                        /* RW */
                unsigned long   n_skt:4;                        /* RW */
                unsigned long   rsvd_10_63:54;
        } s2;
+       struct uv3h_rh_gam_config_mmr_s {
+               unsigned long   m_skt:6;                        /* RW */
+               unsigned long   n_skt:4;                        /* RW */
+               unsigned long   rsvd_10_63:54;
+       } s3;
 };
 
 /* ========================================================================= */
 /*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
-#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR              0x1600010UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
 
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT    28
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT   52
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT  63
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK    0x00003ffff0000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK   0x00f0000000000000UL
+#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK  0x8000000000000000UL
 
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT   28
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT    48
@@ -1733,6 +2351,13 @@ union uvh_rh_gam_config_mmr_u {
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK  0x00f0000000000000UL
 #define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
 
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT   28
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT  52
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK   0x00003ffff0000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK  0x00f0000000000000UL
+#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
 #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT   28
 #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT  52
 #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -1740,12 +2365,23 @@ union uvh_rh_gam_config_mmr_u {
 #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK  0x00f0000000000000UL
 #define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
 
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT   28
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT  52
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT   62
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK   0x00003ffff0000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK  0x00f0000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK   0x4000000000000000UL
+#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
 union uvh_rh_gam_gru_overlay_config_mmr_u {
        unsigned long   v;
        struct uvh_rh_gam_gru_overlay_config_mmr_s {
                unsigned long   rsvd_0_27:28;
                unsigned long   base:18;                        /* RW */
-               unsigned long   rsvd_46_62:17;
+               unsigned long   rsvd_46_51:6;
+               unsigned long   n_gru:4;                        /* RW */
+               unsigned long   rsvd_56_62:7;
                unsigned long   enable:1;                       /* RW */
        } s;
        struct uv1h_rh_gam_gru_overlay_config_mmr_s {
@@ -1758,6 +2394,14 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
                unsigned long   rsvd_56_62:7;
                unsigned long   enable:1;                       /* RW */
        } s1;
+       struct uvxh_rh_gam_gru_overlay_config_mmr_s {
+               unsigned long   rsvd_0_27:28;
+               unsigned long   base:18;                        /* RW */
+               unsigned long   rsvd_46_51:6;
+               unsigned long   n_gru:4;                        /* RW */
+               unsigned long   rsvd_56_62:7;
+               unsigned long   enable:1;                       /* RW */
+       } sx;
        struct uv2h_rh_gam_gru_overlay_config_mmr_s {
                unsigned long   rsvd_0_27:28;
                unsigned long   base:18;                        /* RW */
@@ -1766,12 +2410,22 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
                unsigned long   rsvd_56_62:7;
                unsigned long   enable:1;                       /* RW */
        } s2;
+       struct uv3h_rh_gam_gru_overlay_config_mmr_s {
+               unsigned long   rsvd_0_27:28;
+               unsigned long   base:18;                        /* RW */
+               unsigned long   rsvd_46_51:6;
+               unsigned long   n_gru:4;                        /* RW */
+               unsigned long   rsvd_56_61:6;
+               unsigned long   mode:1;                         /* RW */
+               unsigned long   enable:1;                       /* RW */
+       } s3;
 };
 
 /* ========================================================================= */
 /*                   UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR                     */
 /* ========================================================================= */
-#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR            0x1600030UL
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
 
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
 #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
@@ -1814,10 +2468,15 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
 /* ========================================================================= */
 /*                    UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
-#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR              0x1600028UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
 
 #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT    26
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT  63
 #define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK    0x00003ffffc000000UL
+#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK  0x8000000000000000UL
 
 #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT   26
 #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
@@ -1826,11 +2485,21 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
 #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
 #define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
 
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT   26
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK   0x00003ffffc000000UL
+#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
 #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT   26
 #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
 #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK   0x00003ffffc000000UL
 #define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
 
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT   26
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK   0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
+
 union uvh_rh_gam_mmr_overlay_config_mmr_u {
        unsigned long   v;
        struct uvh_rh_gam_mmr_overlay_config_mmr_s {
@@ -1846,18 +2515,30 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
                unsigned long   rsvd_47_62:16;
                unsigned long   enable:1;                       /* RW */
        } s1;
+       struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
+               unsigned long   rsvd_0_25:26;
+               unsigned long   base:20;                        /* RW */
+               unsigned long   rsvd_46_62:17;
+               unsigned long   enable:1;                       /* RW */
+       } sx;
        struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
                unsigned long   rsvd_0_25:26;
                unsigned long   base:20;                        /* RW */
                unsigned long   rsvd_46_62:17;
                unsigned long   enable:1;                       /* RW */
        } s2;
+       struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
+               unsigned long   rsvd_0_25:26;
+               unsigned long   base:20;                        /* RW */
+               unsigned long   rsvd_46_62:17;
+               unsigned long   enable:1;                       /* RW */
+       } s3;
 };
 
 /* ========================================================================= */
 /*                                 UVH_RTC                                   */
 /* ========================================================================= */
-#define UVH_RTC                                                0x340000UL
+#define UVH_RTC 0x340000UL
 
 #define UVH_RTC_REAL_TIME_CLOCK_SHFT                   0
 #define UVH_RTC_REAL_TIME_CLOCK_MASK                   0x00ffffffffffffffUL
@@ -1873,7 +2554,7 @@ union uvh_rtc_u {
 /* ========================================================================= */
 /*                           UVH_RTC1_INT_CONFIG                             */
 /* ========================================================================= */
-#define UVH_RTC1_INT_CONFIG                            0x615c0UL
+#define UVH_RTC1_INT_CONFIG 0x615c0UL
 
 #define UVH_RTC1_INT_CONFIG_VECTOR_SHFT                        0
 #define UVH_RTC1_INT_CONFIG_DM_SHFT                    8
@@ -1911,8 +2592,8 @@ union uvh_rtc1_int_config_u {
 /* ========================================================================= */
 /*                               UVH_SCRATCH5                                */
 /* ========================================================================= */
-#define UVH_SCRATCH5                                   0x2d0200UL
-#define UVH_SCRATCH5_32                                        0x778
+#define UVH_SCRATCH5 0x2d0200UL
+#define UVH_SCRATCH5_32 0x778
 
 #define UVH_SCRATCH5_SCRATCH5_SHFT                     0
 #define UVH_SCRATCH5_SCRATCH5_MASK                     0xffffffffffffffffUL
@@ -1925,79 +2606,79 @@ union uvh_scratch5_u {
 };
 
 /* ========================================================================= */
-/*                           UV2H_EVENT_OCCURRED2                            */
-/* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2                           0x70100UL
-#define UV2H_EVENT_OCCURRED2_32                                0xb68
-
-#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT                        0
-#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT                        1
-#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT                        2
-#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT                        3
-#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT                        4
-#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT                        5
-#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT                        6
-#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT                        7
-#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT                        8
-#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT                        9
-#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT               10
-#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT               11
-#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT               12
-#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT               13
-#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT               14
-#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT               15
-#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT               16
-#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT               17
-#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT               18
-#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT               19
-#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT               20
-#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT               21
-#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT               22
-#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT               23
-#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT               24
-#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT               25
-#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT               26
-#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT               27
-#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT               28
-#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT               29
-#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT               30
-#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT               31
-#define UV2H_EVENT_OCCURRED2_RTC_0_MASK                        0x0000000000000001UL
-#define UV2H_EVENT_OCCURRED2_RTC_1_MASK                        0x0000000000000002UL
-#define UV2H_EVENT_OCCURRED2_RTC_2_MASK                        0x0000000000000004UL
-#define UV2H_EVENT_OCCURRED2_RTC_3_MASK                        0x0000000000000008UL
-#define UV2H_EVENT_OCCURRED2_RTC_4_MASK                        0x0000000000000010UL
-#define UV2H_EVENT_OCCURRED2_RTC_5_MASK                        0x0000000000000020UL
-#define UV2H_EVENT_OCCURRED2_RTC_6_MASK                        0x0000000000000040UL
-#define UV2H_EVENT_OCCURRED2_RTC_7_MASK                        0x0000000000000080UL
-#define UV2H_EVENT_OCCURRED2_RTC_8_MASK                        0x0000000000000100UL
-#define UV2H_EVENT_OCCURRED2_RTC_9_MASK                        0x0000000000000200UL
-#define UV2H_EVENT_OCCURRED2_RTC_10_MASK               0x0000000000000400UL
-#define UV2H_EVENT_OCCURRED2_RTC_11_MASK               0x0000000000000800UL
-#define UV2H_EVENT_OCCURRED2_RTC_12_MASK               0x0000000000001000UL
-#define UV2H_EVENT_OCCURRED2_RTC_13_MASK               0x0000000000002000UL
-#define UV2H_EVENT_OCCURRED2_RTC_14_MASK               0x0000000000004000UL
-#define UV2H_EVENT_OCCURRED2_RTC_15_MASK               0x0000000000008000UL
-#define UV2H_EVENT_OCCURRED2_RTC_16_MASK               0x0000000000010000UL
-#define UV2H_EVENT_OCCURRED2_RTC_17_MASK               0x0000000000020000UL
-#define UV2H_EVENT_OCCURRED2_RTC_18_MASK               0x0000000000040000UL
-#define UV2H_EVENT_OCCURRED2_RTC_19_MASK               0x0000000000080000UL
-#define UV2H_EVENT_OCCURRED2_RTC_20_MASK               0x0000000000100000UL
-#define UV2H_EVENT_OCCURRED2_RTC_21_MASK               0x0000000000200000UL
-#define UV2H_EVENT_OCCURRED2_RTC_22_MASK               0x0000000000400000UL
-#define UV2H_EVENT_OCCURRED2_RTC_23_MASK               0x0000000000800000UL
-#define UV2H_EVENT_OCCURRED2_RTC_24_MASK               0x0000000001000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_25_MASK               0x0000000002000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_26_MASK               0x0000000004000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_27_MASK               0x0000000008000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_28_MASK               0x0000000010000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_29_MASK               0x0000000020000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_30_MASK               0x0000000040000000UL
-#define UV2H_EVENT_OCCURRED2_RTC_31_MASK               0x0000000080000000UL
-
-union uv2h_event_occurred2_u {
+/*                          UVXH_EVENT_OCCURRED2                             */
+/* ========================================================================= */
+#define UVXH_EVENT_OCCURRED2 0x70100UL
+#define UVXH_EVENT_OCCURRED2_32 0xb68
+
+#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT                        0
+#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT                        1
+#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT                        2
+#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT                        3
+#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT                        4
+#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT                        5
+#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT                        6
+#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT                        7
+#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT                        8
+#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT                        9
+#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT               10
+#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT               11
+#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT               12
+#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT               13
+#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT               14
+#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT               15
+#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT               16
+#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT               17
+#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT               18
+#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT               19
+#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT               20
+#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT               21
+#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT               22
+#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT               23
+#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT               24
+#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT               25
+#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT               26
+#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT               27
+#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT               28
+#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT               29
+#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT               30
+#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT               31
+#define UVXH_EVENT_OCCURRED2_RTC_0_MASK                        0x0000000000000001UL
+#define UVXH_EVENT_OCCURRED2_RTC_1_MASK                        0x0000000000000002UL
+#define UVXH_EVENT_OCCURRED2_RTC_2_MASK                        0x0000000000000004UL
+#define UVXH_EVENT_OCCURRED2_RTC_3_MASK                        0x0000000000000008UL
+#define UVXH_EVENT_OCCURRED2_RTC_4_MASK                        0x0000000000000010UL
+#define UVXH_EVENT_OCCURRED2_RTC_5_MASK                        0x0000000000000020UL
+#define UVXH_EVENT_OCCURRED2_RTC_6_MASK                        0x0000000000000040UL
+#define UVXH_EVENT_OCCURRED2_RTC_7_MASK                        0x0000000000000080UL
+#define UVXH_EVENT_OCCURRED2_RTC_8_MASK                        0x0000000000000100UL
+#define UVXH_EVENT_OCCURRED2_RTC_9_MASK                        0x0000000000000200UL
+#define UVXH_EVENT_OCCURRED2_RTC_10_MASK               0x0000000000000400UL
+#define UVXH_EVENT_OCCURRED2_RTC_11_MASK               0x0000000000000800UL
+#define UVXH_EVENT_OCCURRED2_RTC_12_MASK               0x0000000000001000UL
+#define UVXH_EVENT_OCCURRED2_RTC_13_MASK               0x0000000000002000UL
+#define UVXH_EVENT_OCCURRED2_RTC_14_MASK               0x0000000000004000UL
+#define UVXH_EVENT_OCCURRED2_RTC_15_MASK               0x0000000000008000UL
+#define UVXH_EVENT_OCCURRED2_RTC_16_MASK               0x0000000000010000UL
+#define UVXH_EVENT_OCCURRED2_RTC_17_MASK               0x0000000000020000UL
+#define UVXH_EVENT_OCCURRED2_RTC_18_MASK               0x0000000000040000UL
+#define UVXH_EVENT_OCCURRED2_RTC_19_MASK               0x0000000000080000UL
+#define UVXH_EVENT_OCCURRED2_RTC_20_MASK               0x0000000000100000UL
+#define UVXH_EVENT_OCCURRED2_RTC_21_MASK               0x0000000000200000UL
+#define UVXH_EVENT_OCCURRED2_RTC_22_MASK               0x0000000000400000UL
+#define UVXH_EVENT_OCCURRED2_RTC_23_MASK               0x0000000000800000UL
+#define UVXH_EVENT_OCCURRED2_RTC_24_MASK               0x0000000001000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_25_MASK               0x0000000002000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_26_MASK               0x0000000004000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_27_MASK               0x0000000008000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_28_MASK               0x0000000010000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_29_MASK               0x0000000020000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_30_MASK               0x0000000040000000UL
+#define UVXH_EVENT_OCCURRED2_RTC_31_MASK               0x0000000080000000UL
+
+union uvxh_event_occurred2_u {
        unsigned long   v;
-       struct uv2h_event_occurred2_s {
+       struct uvxh_event_occurred2_s {
                unsigned long   rtc_0:1;                        /* RW */
                unsigned long   rtc_1:1;                        /* RW */
                unsigned long   rtc_2:1;                        /* RW */
@@ -2031,29 +2712,46 @@ union uv2h_event_occurred2_u {
                unsigned long   rtc_30:1;                       /* RW */
                unsigned long   rtc_31:1;                       /* RW */
                unsigned long   rsvd_32_63:32;
-       } s1;
+       } sx;
 };
 
 /* ========================================================================= */
-/*                        UV2H_EVENT_OCCURRED2_ALIAS                         */
+/*                       UVXH_EVENT_OCCURRED2_ALIAS                          */
 /* ========================================================================= */
-#define UV2H_EVENT_OCCURRED2_ALIAS                     0x70108UL
-#define UV2H_EVENT_OCCURRED2_ALIAS_32                  0xb70
+#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
+#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
+
 
 /* ========================================================================= */
-/*                    UV2H_LB_BAU_SB_ACTIVATION_STATUS_2                     */
+/*                   UVXH_LB_BAU_SB_ACTIVATION_STATUS_2                      */
 /* ========================================================================= */
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2             0x320130UL
-#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32          0x9f0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
+#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
+
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
 
 #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
 #define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
 
-union uv2h_lb_bau_sb_activation_status_2_u {
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
+#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
+
+union uvxh_lb_bau_sb_activation_status_2_u {
        unsigned long   v;
+       struct uvxh_lb_bau_sb_activation_status_2_s {
+               unsigned long   aux_error:64;                   /* RW */
+       } sx;
        struct uv2h_lb_bau_sb_activation_status_2_s {
                unsigned long   aux_error:64;                   /* RW */
-       } s1;
+       } s2;
+       struct uv3h_lb_bau_sb_activation_status_2_s {
+               unsigned long   aux_error:64;                   /* RW */
+       } s3;
 };
 
 /* ========================================================================= */
@@ -2073,5 +2771,87 @@ union uv1h_lb_target_physical_apic_id_mask_u {
        } s1;
 };
 
+/* ========================================================================= */
+/*                   UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR                   */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR          0x1603000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT        26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT        46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK        0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK        0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
+       unsigned long   v;
+       struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+               unsigned long   rsvd_0_25:26;
+               unsigned long   base:20;                        /* RW */
+               unsigned long   m_io:6;                         /* RW */
+               unsigned long   n_io:4;
+               unsigned long   rsvd_56_62:7;
+               unsigned long   enable:1;                       /* RW */
+       } s3;
+};
+
+/* ========================================================================= */
+/*                   UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR                   */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR          0x1604000UL
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT        26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT        46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK        0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK        0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
+       unsigned long   v;
+       struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+               unsigned long   rsvd_0_25:26;
+               unsigned long   base:20;                        /* RW */
+               unsigned long   m_io:6;                         /* RW */
+               unsigned long   n_io:4;
+               unsigned long   rsvd_56_62:7;
+               unsigned long   enable:1;                       /* RW */
+       } s3;
+};
+
+/* ========================================================================= */
+/*                  UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR                   */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR         0x1603800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH   128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
+       unsigned long   v;
+       struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+               unsigned long   nasid:15;                       /* RW */
+               unsigned long   rsvd_15_63:49;
+       } s3;
+};
+
+/* ========================================================================= */
+/*                  UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR                   */
+/* ========================================================================= */
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR         0x1604800UL
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH   128
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
+       unsigned long   v;
+       struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+               unsigned long   nasid:15;                       /* RW */
+               unsigned long   rsvd_15_63:49;
+       } s3;
+};
+
 
 #endif /* _ASM_X86_UV_UV_MMRS_H */
index 57693498519c4a69962417c8889b376182b49e2f..7669941cc9d230c1261d82e3eba4a6d92b238eaa 100644 (file)
@@ -181,19 +181,38 @@ struct x86_platform_ops {
 };
 
 struct pci_dev;
+struct msi_msg;
 
 struct x86_msi_ops {
        int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
+       void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
+                               unsigned int dest, struct msi_msg *msg,
+                              u8 hpet_id);
        void (*teardown_msi_irq)(unsigned int irq);
        void (*teardown_msi_irqs)(struct pci_dev *dev);
        void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
+       int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);
 };
 
+struct IO_APIC_route_entry;
+struct io_apic_irq_attr;
+struct irq_data;
+struct cpumask;
+
 struct x86_io_apic_ops {
-       void            (*init)  (void);
-       unsigned int    (*read)  (unsigned int apic, unsigned int reg);
-       void            (*write) (unsigned int apic, unsigned int reg, unsigned int value);
-       void            (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
+       void            (*init)   (void);
+       unsigned int    (*read)   (unsigned int apic, unsigned int reg);
+       void            (*write)  (unsigned int apic, unsigned int reg, unsigned int value);
+       void            (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
+       void            (*disable)(void);
+       void            (*print_entries)(unsigned int apic, unsigned int nr_entries);
+       int             (*set_affinity)(struct irq_data *data,
+                                       const struct cpumask *mask,
+                                       bool force);
+       int             (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
+                                      unsigned int destination, int vector,
+                                      struct io_apic_irq_attr *attr);
+       void            (*eoi_ioapic_pin)(int apic, int pin, int vector);
 };
 
 extern struct x86_init_ops x86_init;
index f8fde90bc45e37171a7390723e3afff2ee9a1788..d8829751b3f895e9fd19fa6bd597758650f417b8 100644 (file)
 #ifdef CONFIG_KMEMCHECK
 /* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
 # include <asm-generic/xor.h>
+#elif !defined(_ASM_X86_XOR_H)
+#define _ASM_X86_XOR_H
+
+/*
+ * Optimized RAID-5 checksumming functions for SSE.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Cache avoiding checksumming functions utilizing KNI instructions
+ * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
+ */
+
+/*
+ * Based on
+ * High-speed RAID5 checksumming functions utilizing SSE instructions.
+ * Copyright (C) 1998 Ingo Molnar.
+ */
+
+/*
+ * x86-64 changes / gcc fixes from Andi Kleen.
+ * Copyright 2002 Andi Kleen, SuSE Labs.
+ *
+ * This hasn't been optimized for the hammer yet, but there are likely
+ * no advantages to be gotten from x86-64 here anyways.
+ */
+
+#include <asm/i387.h>
+
+#ifdef CONFIG_X86_32
+/* reduce register pressure */
+# define XOR_CONSTANT_CONSTRAINT "i"
 #else
+# define XOR_CONSTANT_CONSTRAINT "re"
+#endif
+
+#define OFFS(x)                "16*("#x")"
+#define PF_OFFS(x)     "256+16*("#x")"
+#define PF0(x)         "       prefetchnta "PF_OFFS(x)"(%[p1])         ;\n"
+#define LD(x, y)       "       movaps "OFFS(x)"(%[p1]), %%xmm"#y"      ;\n"
+#define ST(x, y)       "       movaps %%xmm"#y", "OFFS(x)"(%[p1])      ;\n"
+#define PF1(x)         "       prefetchnta "PF_OFFS(x)"(%[p2])         ;\n"
+#define PF2(x)         "       prefetchnta "PF_OFFS(x)"(%[p3])         ;\n"
+#define PF3(x)         "       prefetchnta "PF_OFFS(x)"(%[p4])         ;\n"
+#define PF4(x)         "       prefetchnta "PF_OFFS(x)"(%[p5])         ;\n"
+#define XO1(x, y)      "       xorps "OFFS(x)"(%[p2]), %%xmm"#y"       ;\n"
+#define XO2(x, y)      "       xorps "OFFS(x)"(%[p3]), %%xmm"#y"       ;\n"
+#define XO3(x, y)      "       xorps "OFFS(x)"(%[p4]), %%xmm"#y"       ;\n"
+#define XO4(x, y)      "       xorps "OFFS(x)"(%[p5]), %%xmm"#y"       ;\n"
+#define NOP(x)
+
+#define BLK64(pf, op, i)                               \
+               pf(i)                                   \
+               op(i, 0)                                \
+                       op(i + 1, 1)                    \
+                               op(i + 2, 2)            \
+                                       op(i + 3, 3)
+
+static void
+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i)                                       \
+               LD(i, 0)                                \
+                       LD(i + 1, 1)                    \
+               PF1(i)                                  \
+                               PF1(i + 2)              \
+                               LD(i + 2, 2)            \
+                                       LD(i + 3, 3)    \
+               PF0(i + 4)                              \
+                               PF0(i + 6)              \
+               XO1(i, 0)                               \
+                       XO1(i + 1, 1)                   \
+                               XO1(i + 2, 2)           \
+                                       XO1(i + 3, 3)   \
+               ST(i, 0)                                \
+                       ST(i + 1, 1)                    \
+                               ST(i + 2, 2)            \
+                                       ST(i + 3, 3)    \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines),
+         [p1] "+r" (p1), [p2] "+r" (p2)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i)                       \
+               BLK64(PF0, LD, i)       \
+               BLK64(PF1, XO1, i)      \
+               BLK64(NOP, ST, i)       \
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines),
+         [p1] "+r" (p1), [p2] "+r" (p2)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i + 2)              \
+               LD(i, 0)                                \
+                       LD(i + 1, 1)                    \
+                               LD(i + 2, 2)            \
+                                       LD(i + 3, 3)    \
+               PF2(i)                                  \
+                               PF2(i + 2)              \
+               PF0(i + 4)                              \
+                               PF0(i + 6)              \
+               XO1(i, 0)                               \
+                       XO1(i + 1, 1)                   \
+                               XO1(i + 2, 2)           \
+                                       XO1(i + 3, 3)   \
+               XO2(i, 0)                               \
+                       XO2(i + 1, 1)                   \
+                               XO2(i + 2, 2)           \
+                                       XO2(i + 3, 3)   \
+               ST(i, 0)                                \
+                       ST(i + 1, 1)                    \
+                               ST(i + 2, 2)            \
+                                       ST(i + 3, 3)    \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       add %[inc], %[p3]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines),
+         [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+              unsigned long *p3)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i)                       \
+               BLK64(PF0, LD, i)       \
+               BLK64(PF1, XO1, i)      \
+               BLK64(PF2, XO2, i)      \
+               BLK64(NOP, ST, i)       \
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       add %[inc], %[p3]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines),
+         [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3, unsigned long *p4)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i + 2)              \
+               LD(i, 0)                                \
+                       LD(i + 1, 1)                    \
+                               LD(i + 2, 2)            \
+                                       LD(i + 3, 3)    \
+               PF2(i)                                  \
+                               PF2(i + 2)              \
+               XO1(i, 0)                               \
+                       XO1(i + 1, 1)                   \
+                               XO1(i + 2, 2)           \
+                                       XO1(i + 3, 3)   \
+               PF3(i)                                  \
+                               PF3(i + 2)              \
+               PF0(i + 4)                              \
+                               PF0(i + 6)              \
+               XO2(i, 0)                               \
+                       XO2(i + 1, 1)                   \
+                               XO2(i + 2, 2)           \
+                                       XO2(i + 3, 3)   \
+               XO3(i, 0)                               \
+                       XO3(i + 1, 1)                   \
+                               XO3(i + 2, 2)           \
+                                       XO3(i + 3, 3)   \
+               ST(i, 0)                                \
+                       ST(i + 1, 1)                    \
+                               ST(i + 2, 2)            \
+                                       ST(i + 3, 3)    \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       add %[inc], %[p3]       ;\n"
+       "       add %[inc], %[p4]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines), [p1] "+r" (p1),
+         [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+              unsigned long *p3, unsigned long *p4)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i)                       \
+               BLK64(PF0, LD, i)       \
+               BLK64(PF1, XO1, i)      \
+               BLK64(PF2, XO2, i)      \
+               BLK64(PF3, XO3, i)      \
+               BLK64(NOP, ST, i)       \
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       add %[inc], %[p3]       ;\n"
+       "       add %[inc], %[p4]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines), [p1] "+r" (p1),
+         [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+         unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i) \
+               PF1(i)                                  \
+                               PF1(i + 2)              \
+               LD(i, 0)                                \
+                       LD(i + 1, 1)                    \
+                               LD(i + 2, 2)            \
+                                       LD(i + 3, 3)    \
+               PF2(i)                                  \
+                               PF2(i + 2)              \
+               XO1(i, 0)                               \
+                       XO1(i + 1, 1)                   \
+                               XO1(i + 2, 2)           \
+                                       XO1(i + 3, 3)   \
+               PF3(i)                                  \
+                               PF3(i + 2)              \
+               XO2(i, 0)                               \
+                       XO2(i + 1, 1)                   \
+                               XO2(i + 2, 2)           \
+                                       XO2(i + 3, 3)   \
+               PF4(i)                                  \
+                               PF4(i + 2)              \
+               PF0(i + 4)                              \
+                               PF0(i + 6)              \
+               XO3(i, 0)                               \
+                       XO3(i + 1, 1)                   \
+                               XO3(i + 2, 2)           \
+                                       XO3(i + 3, 3)   \
+               XO4(i, 0)                               \
+                       XO4(i + 1, 1)                   \
+                               XO4(i + 2, 2)           \
+                                       XO4(i + 3, 3)   \
+               ST(i, 0)                                \
+                       ST(i + 1, 1)                    \
+                               ST(i + 2, 2)            \
+                                       ST(i + 3, 3)    \
+
+
+               PF0(0)
+                               PF0(2)
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       add %[inc], %[p3]       ;\n"
+       "       add %[inc], %[p4]       ;\n"
+       "       add %[inc], %[p5]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+         [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static void
+xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+              unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       unsigned long lines = bytes >> 8;
+
+       kernel_fpu_begin();
+
+       asm volatile(
+#undef BLOCK
+#define BLOCK(i)                       \
+               BLK64(PF0, LD, i)       \
+               BLK64(PF1, XO1, i)      \
+               BLK64(PF2, XO2, i)      \
+               BLK64(PF3, XO3, i)      \
+               BLK64(PF4, XO4, i)      \
+               BLK64(NOP, ST, i)       \
+
+       " .align 32                     ;\n"
+       " 1:                            ;\n"
+
+               BLOCK(0)
+               BLOCK(4)
+               BLOCK(8)
+               BLOCK(12)
+
+       "       add %[inc], %[p1]       ;\n"
+       "       add %[inc], %[p2]       ;\n"
+       "       add %[inc], %[p3]       ;\n"
+       "       add %[inc], %[p4]       ;\n"
+       "       add %[inc], %[p5]       ;\n"
+       "       dec %[cnt]              ;\n"
+       "       jnz 1b                  ;\n"
+       : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
+         [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
+       : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
+       : "memory");
+
+       kernel_fpu_end();
+}
+
+static struct xor_block_template xor_block_sse_pf64 = {
+       .name = "prefetch64-sse",
+       .do_2 = xor_sse_2_pf64,
+       .do_3 = xor_sse_3_pf64,
+       .do_4 = xor_sse_4_pf64,
+       .do_5 = xor_sse_5_pf64,
+};
+
+#undef LD
+#undef XO1
+#undef XO2
+#undef XO3
+#undef XO4
+#undef ST
+#undef NOP
+#undef BLK64
+#undef BLOCK
+
+#undef XOR_CONSTANT_CONSTRAINT
+
 #ifdef CONFIG_X86_32
 # include <asm/xor_32.h>
 #else
 # include <asm/xor_64.h>
 #endif
-#endif
+
+#define XOR_SELECT_TEMPLATE(FASTEST) \
+       AVX_SELECT(FASTEST)
+
+#endif /* _ASM_X86_XOR_H */
index f79cb7ec0e06a4c7f36361a381c3156d45dab365..ce05722e3c68bce4d72a1bcdc9e798b5014581cf 100644 (file)
@@ -2,7 +2,7 @@
 #define _ASM_X86_XOR_32_H
 
 /*
- * Optimized RAID-5 checksumming functions for MMX and SSE.
+ * Optimized RAID-5 checksumming functions for MMX.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -529,290 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = {
        .do_5 = xor_p5_mmx_5,
 };
 
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-#define OFFS(x)                "16*("#x")"
-#define PF_OFFS(x)     "256+16*("#x")"
-#define        PF0(x)          "       prefetchnta "PF_OFFS(x)"(%1)            ;\n"
-#define LD(x, y)       "       movaps   "OFFS(x)"(%1), %%xmm"#y"       ;\n"
-#define ST(x, y)       "       movaps %%xmm"#y",   "OFFS(x)"(%1)       ;\n"
-#define PF1(x)         "       prefetchnta "PF_OFFS(x)"(%2)            ;\n"
-#define PF2(x)         "       prefetchnta "PF_OFFS(x)"(%3)            ;\n"
-#define PF3(x)         "       prefetchnta "PF_OFFS(x)"(%4)            ;\n"
-#define PF4(x)         "       prefetchnta "PF_OFFS(x)"(%5)            ;\n"
-#define PF5(x)         "       prefetchnta "PF_OFFS(x)"(%6)            ;\n"
-#define XO1(x, y)      "       xorps   "OFFS(x)"(%2), %%xmm"#y"        ;\n"
-#define XO2(x, y)      "       xorps   "OFFS(x)"(%3), %%xmm"#y"        ;\n"
-#define XO3(x, y)      "       xorps   "OFFS(x)"(%4), %%xmm"#y"        ;\n"
-#define XO4(x, y)      "       xorps   "OFFS(x)"(%5), %%xmm"#y"        ;\n"
-#define XO5(x, y)      "       xorps   "OFFS(x)"(%6), %%xmm"#y"        ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
-       unsigned long lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i)                                       \
-               LD(i, 0)                                \
-                       LD(i + 1, 1)                    \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO1(i, 0)                               \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               ST(i, 0)                                \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addl $256, %1           ;\n"
-       "       addl $256, %2           ;\n"
-       "       decl %0                 ;\n"
-       "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2)
-       :
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3)
-{
-       unsigned long lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-               LD(i,0)                                 \
-                       LD(i + 1, 1)                    \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF2(i)                                  \
-                               PF2(i + 2)              \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO1(i,0)                                \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               XO2(i,0)                                \
-                       XO2(i + 1, 1)                   \
-                               XO2(i + 2, 2)           \
-                                       XO2(i + 3, 3)   \
-               ST(i,0)                                 \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addl $256, %1           ;\n"
-       "       addl $256, %2           ;\n"
-       "       addl $256, %3           ;\n"
-       "       decl %0                 ;\n"
-       "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r"(p2), "+r"(p3)
-       :
-       : "memory" );
-
-       kernel_fpu_end();
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3, unsigned long *p4)
-{
-       unsigned long lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-               LD(i,0)                                 \
-                       LD(i + 1, 1)                    \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF2(i)                                  \
-                               PF2(i + 2)              \
-               XO1(i,0)                                \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               PF3(i)                                  \
-                               PF3(i + 2)              \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO2(i,0)                                \
-                       XO2(i + 1, 1)                   \
-                               XO2(i + 2, 2)           \
-                                       XO2(i + 3, 3)   \
-               XO3(i,0)                                \
-                       XO3(i + 1, 1)                   \
-                               XO3(i + 2, 2)           \
-                                       XO3(i + 3, 3)   \
-               ST(i,0)                                 \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addl $256, %1           ;\n"
-       "       addl $256, %2           ;\n"
-       "       addl $256, %3           ;\n"
-       "       addl $256, %4           ;\n"
-       "       decl %0                 ;\n"
-       "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
-       :
-       : "memory" );
-
-       kernel_fpu_end();
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
-       unsigned long lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       /* Make sure GCC forgets anything it knows about p4 or p5,
-          such that it won't pass to the asm volatile below a
-          register that is shared with any other variable.  That's
-          because we modify p4 and p5 there, but we can't mark them
-          as read/write, otherwise we'd overflow the 10-asm-operands
-          limit of GCC < 3.1.  */
-       asm("" : "+r" (p4), "+r" (p5));
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-               LD(i,0)                                 \
-                       LD(i + 1, 1)                    \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF2(i)                                  \
-                               PF2(i + 2)              \
-               XO1(i,0)                                \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               PF3(i)                                  \
-                               PF3(i + 2)              \
-               XO2(i,0)                                \
-                       XO2(i + 1, 1)                   \
-                               XO2(i + 2, 2)           \
-                                       XO2(i + 3, 3)   \
-               PF4(i)                                  \
-                               PF4(i + 2)              \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO3(i,0)                                \
-                       XO3(i + 1, 1)                   \
-                               XO3(i + 2, 2)           \
-                                       XO3(i + 3, 3)   \
-               XO4(i,0)                                \
-                       XO4(i + 1, 1)                   \
-                               XO4(i + 2, 2)           \
-                                       XO4(i + 3, 3)   \
-               ST(i,0)                                 \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addl $256, %1           ;\n"
-       "       addl $256, %2           ;\n"
-       "       addl $256, %3           ;\n"
-       "       addl $256, %4           ;\n"
-       "       addl $256, %5           ;\n"
-       "       decl %0                 ;\n"
-       "       jnz 1b                  ;\n"
-       : "+r" (lines),
-         "+r" (p1), "+r" (p2), "+r" (p3)
-       : "r" (p4), "r" (p5)
-       : "memory");
-
-       /* p4 and p5 were modified, and now the variables are dead.
-          Clobber them just to be sure nobody does something stupid
-          like assuming they have some legal value.  */
-       asm("" : "=r" (p4), "=r" (p5));
-
-       kernel_fpu_end();
-}
-
 static struct xor_block_template xor_block_pIII_sse = {
        .name = "pIII_sse",
        .do_2 = xor_sse_2,
@@ -827,26 +543,25 @@ static struct xor_block_template xor_block_pIII_sse = {
 /* Also try the generic routines.  */
 #include <asm-generic/xor.h>
 
+/* We force the use of the SSE xor block because it can write around L2.
+   We may also be able to load into the L1 only depending on how the cpu
+   deals with a load to a line that is being prefetched.  */
 #undef XOR_TRY_TEMPLATES
 #define XOR_TRY_TEMPLATES                              \
 do {                                                   \
-       xor_speed(&xor_block_8regs);                    \
-       xor_speed(&xor_block_8regs_p);                  \
-       xor_speed(&xor_block_32regs);                   \
-       xor_speed(&xor_block_32regs_p);                 \
        AVX_XOR_SPEED;                                  \
-       if (cpu_has_xmm)                                \
+       if (cpu_has_xmm) {                              \
                xor_speed(&xor_block_pIII_sse);         \
-       if (cpu_has_mmx) {                              \
+               xor_speed(&xor_block_sse_pf64);         \
+       } else if (cpu_has_mmx) {                       \
                xor_speed(&xor_block_pII_mmx);          \
                xor_speed(&xor_block_p5_mmx);           \
+       } else {                                        \
+               xor_speed(&xor_block_8regs);            \
+               xor_speed(&xor_block_8regs_p);          \
+               xor_speed(&xor_block_32regs);           \
+               xor_speed(&xor_block_32regs_p);         \
        }                                               \
 } while (0)
 
-/* We force the use of the SSE xor block because it can write around L2.
-   We may also be able to load into the L1 only depending on how the cpu
-   deals with a load to a line that is being prefetched.  */
-#define XOR_SELECT_TEMPLATE(FASTEST)                   \
-       AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
-
 #endif /* _ASM_X86_XOR_32_H */
index 87ac522c4af53b9770297f38f78a02d065bb2cbf..546f1e3b87cce3d346f95fec83ff2630342da47c 100644 (file)
@@ -1,301 +1,6 @@
 #ifndef _ASM_X86_XOR_64_H
 #define _ASM_X86_XOR_64_H
 
-/*
- * Optimized RAID-5 checksumming functions for MMX and SSE.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-/*
- * Cache avoiding checksumming functions utilizing KNI instructions
- * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
- */
-
-/*
- * Based on
- * High-speed RAID5 checksumming functions utilizing SSE instructions.
- * Copyright (C) 1998 Ingo Molnar.
- */
-
-/*
- * x86-64 changes / gcc fixes from Andi Kleen.
- * Copyright 2002 Andi Kleen, SuSE Labs.
- *
- * This hasn't been optimized for the hammer yet, but there are likely
- * no advantages to be gotten from x86-64 here anyways.
- */
-
-#include <asm/i387.h>
-
-#define OFFS(x)                "16*("#x")"
-#define PF_OFFS(x)     "256+16*("#x")"
-#define        PF0(x)          "       prefetchnta "PF_OFFS(x)"(%[p1])         ;\n"
-#define LD(x, y)       "       movaps   "OFFS(x)"(%[p1]), %%xmm"#y"    ;\n"
-#define ST(x, y)       "       movaps %%xmm"#y",   "OFFS(x)"(%[p1])    ;\n"
-#define PF1(x)         "       prefetchnta "PF_OFFS(x)"(%[p2])         ;\n"
-#define PF2(x)         "       prefetchnta "PF_OFFS(x)"(%[p3])         ;\n"
-#define PF3(x)         "       prefetchnta "PF_OFFS(x)"(%[p4])         ;\n"
-#define PF4(x)         "       prefetchnta "PF_OFFS(x)"(%[p5])         ;\n"
-#define PF5(x)         "       prefetchnta "PF_OFFS(x)"(%[p6])         ;\n"
-#define XO1(x, y)      "       xorps   "OFFS(x)"(%[p2]), %%xmm"#y"     ;\n"
-#define XO2(x, y)      "       xorps   "OFFS(x)"(%[p3]), %%xmm"#y"     ;\n"
-#define XO3(x, y)      "       xorps   "OFFS(x)"(%[p4]), %%xmm"#y"     ;\n"
-#define XO4(x, y)      "       xorps   "OFFS(x)"(%[p5]), %%xmm"#y"     ;\n"
-#define XO5(x, y)      "       xorps   "OFFS(x)"(%[p6]), %%xmm"#y"     ;\n"
-
-
-static void
-xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
-{
-       unsigned int lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               LD(i, 0)                                \
-                       LD(i + 1, 1)                    \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO1(i, 0)                               \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               ST(i, 0)                                \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addq %[inc], %[p1]           ;\n"
-       "       addq %[inc], %[p2]           ;\n"
-               "               decl %[cnt] ; jnz 1b"
-       : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
-       : [inc] "r" (256UL)
-       : "memory");
-
-       kernel_fpu_end();
-}
-
-static void
-xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3)
-{
-       unsigned int lines = bytes >> 8;
-
-       kernel_fpu_begin();
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-               LD(i, 0)                                        \
-                       LD(i + 1, 1)                    \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF2(i)                                  \
-                               PF2(i + 2)              \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO1(i, 0)                               \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               XO2(i, 0)                               \
-                       XO2(i + 1, 1)                   \
-                               XO2(i + 2, 2)           \
-                                       XO2(i + 3, 3)   \
-               ST(i, 0)                                \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addq %[inc], %[p1]           ;\n"
-       "       addq %[inc], %[p2]          ;\n"
-       "       addq %[inc], %[p3]           ;\n"
-               "               decl %[cnt] ; jnz 1b"
-       : [cnt] "+r" (lines),
-         [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
-       : [inc] "r" (256UL)
-       : "memory");
-       kernel_fpu_end();
-}
-
-static void
-xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3, unsigned long *p4)
-{
-       unsigned int lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-               LD(i, 0)                                \
-                       LD(i + 1, 1)                    \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF2(i)                                  \
-                               PF2(i + 2)              \
-               XO1(i, 0)                               \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               PF3(i)                                  \
-                               PF3(i + 2)              \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO2(i, 0)                               \
-                       XO2(i + 1, 1)                   \
-                               XO2(i + 2, 2)           \
-                                       XO2(i + 3, 3)   \
-               XO3(i, 0)                               \
-                       XO3(i + 1, 1)                   \
-                               XO3(i + 2, 2)           \
-                                       XO3(i + 3, 3)   \
-               ST(i, 0)                                \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addq %[inc], %[p1]           ;\n"
-       "       addq %[inc], %[p2]           ;\n"
-       "       addq %[inc], %[p3]           ;\n"
-       "       addq %[inc], %[p4]           ;\n"
-       "       decl %[cnt] ; jnz 1b"
-       : [cnt] "+c" (lines),
-         [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
-       : [inc] "r" (256UL)
-       : "memory" );
-
-       kernel_fpu_end();
-}
-
-static void
-xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
-         unsigned long *p3, unsigned long *p4, unsigned long *p5)
-{
-       unsigned int lines = bytes >> 8;
-
-       kernel_fpu_begin();
-
-       asm volatile(
-#undef BLOCK
-#define BLOCK(i) \
-               PF1(i)                                  \
-                               PF1(i + 2)              \
-               LD(i, 0)                                \
-                       LD(i + 1, 1)                    \
-                               LD(i + 2, 2)            \
-                                       LD(i + 3, 3)    \
-               PF2(i)                                  \
-                               PF2(i + 2)              \
-               XO1(i, 0)                               \
-                       XO1(i + 1, 1)                   \
-                               XO1(i + 2, 2)           \
-                                       XO1(i + 3, 3)   \
-               PF3(i)                                  \
-                               PF3(i + 2)              \
-               XO2(i, 0)                               \
-                       XO2(i + 1, 1)                   \
-                               XO2(i + 2, 2)           \
-                                       XO2(i + 3, 3)   \
-               PF4(i)                                  \
-                               PF4(i + 2)              \
-               PF0(i + 4)                              \
-                               PF0(i + 6)              \
-               XO3(i, 0)                               \
-                       XO3(i + 1, 1)                   \
-                               XO3(i + 2, 2)           \
-                                       XO3(i + 3, 3)   \
-               XO4(i, 0)                               \
-                       XO4(i + 1, 1)                   \
-                               XO4(i + 2, 2)           \
-                                       XO4(i + 3, 3)   \
-               ST(i, 0)                                \
-                       ST(i + 1, 1)                    \
-                               ST(i + 2, 2)            \
-                                       ST(i + 3, 3)    \
-
-
-               PF0(0)
-                               PF0(2)
-
-       " .align 32                     ;\n"
-       " 1:                            ;\n"
-
-               BLOCK(0)
-               BLOCK(4)
-               BLOCK(8)
-               BLOCK(12)
-
-       "       addq %[inc], %[p1]           ;\n"
-       "       addq %[inc], %[p2]           ;\n"
-       "       addq %[inc], %[p3]           ;\n"
-       "       addq %[inc], %[p4]           ;\n"
-       "       addq %[inc], %[p5]           ;\n"
-       "       decl %[cnt] ; jnz 1b"
-       : [cnt] "+c" (lines),
-         [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
-         [p5] "+r" (p5)
-       : [inc] "r" (256UL)
-       : "memory");
-
-       kernel_fpu_end();
-}
-
 static struct xor_block_template xor_block_sse = {
        .name = "generic_sse",
        .do_2 = xor_sse_2,
@@ -308,17 +13,15 @@ static struct xor_block_template xor_block_sse = {
 /* Also try the AVX routines */
 #include <asm/xor_avx.h>
 
+/* We force the use of the SSE xor block because it can write around L2.
+   We may also be able to load into the L1 only depending on how the cpu
+   deals with a load to a line that is being prefetched.  */
 #undef XOR_TRY_TEMPLATES
 #define XOR_TRY_TEMPLATES                      \
 do {                                           \
        AVX_XOR_SPEED;                          \
+       xor_speed(&xor_block_sse_pf64);         \
        xor_speed(&xor_block_sse);              \
 } while (0)
 
-/* We force the use of the SSE xor block because it can write around L2.
-   We may also be able to load into the L1 only depending on how the cpu
-   deals with a load to a line that is being prefetched.  */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
-       AVX_SELECT(&xor_block_sse)
-
 #endif /* _ASM_X86_XOR_64_H */
index 92862cd902012b9fb174d03e6e2cd687787136cf..c15ddaf907107134d6cd2f8d86f554e510a6f848 100644 (file)
@@ -1,6 +1,31 @@
 #ifndef _ASM_X86_BOOTPARAM_H
 #define _ASM_X86_BOOTPARAM_H
 
+/* setup_data types */
+#define SETUP_NONE                     0
+#define SETUP_E820_EXT                 1
+#define SETUP_DTB                      2
+#define SETUP_PCI                      3
+
+/* ram_size flags */
+#define RAMDISK_IMAGE_START_MASK       0x07FF
+#define RAMDISK_PROMPT_FLAG            0x8000
+#define RAMDISK_LOAD_FLAG              0x4000
+
+/* loadflags */
+#define LOADED_HIGH    (1<<0)
+#define QUIET_FLAG     (1<<5)
+#define KEEP_SEGMENTS  (1<<6)
+#define CAN_USE_HEAP   (1<<7)
+
+/* xloadflags */
+#define XLF_KERNEL_64                  (1<<0)
+#define XLF_CAN_BE_LOADED_ABOVE_4G     (1<<1)
+#define XLF_EFI_HANDOVER_32            (1<<2)
+#define XLF_EFI_HANDOVER_64            (1<<3)
+
+#ifndef __ASSEMBLY__
+
 #include <linux/types.h>
 #include <linux/screen_info.h>
 #include <linux/apm_bios.h>
@@ -9,12 +34,6 @@
 #include <asm/ist.h>
 #include <video/edid.h>
 
-/* setup data types */
-#define SETUP_NONE                     0
-#define SETUP_E820_EXT                 1
-#define SETUP_DTB                      2
-#define SETUP_PCI                      3
-
 /* extensible setup data list node */
 struct setup_data {
        __u64 next;
@@ -28,9 +47,6 @@ struct setup_header {
        __u16   root_flags;
        __u32   syssize;
        __u16   ram_size;
-#define RAMDISK_IMAGE_START_MASK       0x07FF
-#define RAMDISK_PROMPT_FLAG            0x8000
-#define RAMDISK_LOAD_FLAG              0x4000
        __u16   vid_mode;
        __u16   root_dev;
        __u16   boot_flag;
@@ -42,10 +58,6 @@ struct setup_header {
        __u16   kernel_version;
        __u8    type_of_loader;
        __u8    loadflags;
-#define LOADED_HIGH    (1<<0)
-#define QUIET_FLAG     (1<<5)
-#define KEEP_SEGMENTS  (1<<6)
-#define CAN_USE_HEAP   (1<<7)
        __u16   setup_move_size;
        __u32   code32_start;
        __u32   ramdisk_image;
@@ -58,7 +70,8 @@ struct setup_header {
        __u32   initrd_addr_max;
        __u32   kernel_alignment;
        __u8    relocatable_kernel;
-       __u8    _pad2[3];
+       __u8    min_alignment;
+       __u16   xloadflags;
        __u32   cmdline_size;
        __u32   hardware_subarch;
        __u64   hardware_subarch_data;
@@ -106,7 +119,10 @@ struct boot_params {
        __u8  hd1_info[16];     /* obsolete! */         /* 0x090 */
        struct sys_desc_table sys_desc_table;           /* 0x0a0 */
        struct olpc_ofw_header olpc_ofw_header;         /* 0x0b0 */
-       __u8  _pad4[128];                               /* 0x0c0 */
+       __u32 ext_ramdisk_image;                        /* 0x0c0 */
+       __u32 ext_ramdisk_size;                         /* 0x0c4 */
+       __u32 ext_cmd_line_ptr;                         /* 0x0c8 */
+       __u8  _pad4[116];                               /* 0x0cc */
        struct edid_info edid_info;                     /* 0x140 */
        struct efi_info efi_info;                       /* 0x1c0 */
        __u32 alt_mem_k;                                /* 0x1e0 */
@@ -115,7 +131,20 @@ struct boot_params {
        __u8  eddbuf_entries;                           /* 0x1e9 */
        __u8  edd_mbr_sig_buf_entries;                  /* 0x1ea */
        __u8  kbd_status;                               /* 0x1eb */
-       __u8  _pad6[5];                                 /* 0x1ec */
+       __u8  _pad5[3];                                 /* 0x1ec */
+       /*
+        * The sentinel is set to a nonzero value (0xff) in header.S.
+        *
+        * A bootloader is supposed to only take setup_header and put
+        * it into a clean boot_params buffer. If it turns out that
+        * it is clumsy or too generous with the buffer, it most
+        * probably will pick up the sentinel variable too. The fact
+        * that this variable then is still 0xff will let kernel
+        * know that some variables in boot_params are invalid and
+        * kernel should zero out certain portions of boot_params.
+        */
+       __u8  sentinel;                                 /* 0x1ef */
+       __u8  _pad6[1];                                 /* 0x1f0 */
        struct setup_header hdr;    /* setup header */  /* 0x1f1 */
        __u8  _pad7[0x290-0x1f1-sizeof(struct setup_header)];
        __u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX];      /* 0x290 */
@@ -134,6 +163,6 @@ enum {
        X86_NR_SUBARCHS,
 };
 
-
+#endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_BOOTPARAM_H */
index 58c829871c317b89d2cf011ea78f099de69622a4..a0eab85ce7b8283cc5f194ec175e8076669a762f 100644 (file)
@@ -4,66 +4,6 @@
 #include <linux/types.h>
 #include <asm/ioctls.h>
 
-/*
- * Machine Check support for x86
- */
-
-/* MCG_CAP register defines */
-#define MCG_BANKCNT_MASK       0xff         /* Number of Banks */
-#define MCG_CTL_P              (1ULL<<8)    /* MCG_CTL register available */
-#define MCG_EXT_P              (1ULL<<9)    /* Extended registers available */
-#define MCG_CMCI_P             (1ULL<<10)   /* CMCI supported */
-#define MCG_EXT_CNT_MASK       0xff0000     /* Number of Extended registers */
-#define MCG_EXT_CNT_SHIFT      16
-#define MCG_EXT_CNT(c)         (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
-#define MCG_SER_P              (1ULL<<24)   /* MCA recovery/new status bits */
-
-/* MCG_STATUS register defines */
-#define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */
-#define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */
-#define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */
-
-/* MCi_STATUS register defines */
-#define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */
-#define MCI_STATUS_OVER  (1ULL<<62)  /* previous errors lost */
-#define MCI_STATUS_UC    (1ULL<<61)  /* uncorrected error */
-#define MCI_STATUS_EN    (1ULL<<60)  /* error enabled */
-#define MCI_STATUS_MISCV (1ULL<<59)  /* misc error reg. valid */
-#define MCI_STATUS_ADDRV (1ULL<<58)  /* addr reg. valid */
-#define MCI_STATUS_PCC   (1ULL<<57)  /* processor context corrupt */
-#define MCI_STATUS_S    (1ULL<<56)  /* Signaled machine check */
-#define MCI_STATUS_AR   (1ULL<<55)  /* Action required */
-#define MCACOD           0xffff     /* MCA Error Code */
-
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB   0x00C0  /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK        0xfff0
-#define MCACOD_L3WB    0x017A  /* L3 Explicit Writeback */
-#define MCACOD_DATA    0x0134  /* Data Load */
-#define MCACOD_INSTR   0x0150  /* Instruction Fetch */
-
-/* MCi_MISC register defines */
-#define MCI_MISC_ADDR_LSB(m)   ((m) & 0x3f)
-#define MCI_MISC_ADDR_MODE(m)  (((m) >> 6) & 7)
-#define  MCI_MISC_ADDR_SEGOFF  0       /* segment offset */
-#define  MCI_MISC_ADDR_LINEAR  1       /* linear address */
-#define  MCI_MISC_ADDR_PHYS    2       /* physical address */
-#define  MCI_MISC_ADDR_MEM     3       /* memory address */
-#define  MCI_MISC_ADDR_GENERIC 7       /* generic */
-
-/* CTL2 register defines */
-#define MCI_CTL2_CMCI_EN               (1ULL << 30)
-#define MCI_CTL2_CMCI_THRESHOLD_MASK   0x7fffULL
-
-#define MCJ_CTX_MASK           3
-#define MCJ_CTX(flags)         ((flags) & MCJ_CTX_MASK)
-#define MCJ_CTX_RANDOM         0    /* inject context: random */
-#define MCJ_CTX_PROCESS                0x1  /* inject context: process */
-#define MCJ_CTX_IRQ            0x2  /* inject context: IRQ */
-#define MCJ_NMI_BROADCAST      0x4  /* do NMI broadcasting */
-#define MCJ_EXCEPTION          0x8  /* raise as exception */
-#define MCJ_IRQ_BRAODCAST      0x10 /* do IRQ broadcasting */
-
 /* Fields are zero when not available */
 struct mce {
        __u64 status;
@@ -87,35 +27,8 @@ struct mce {
        __u64 mcgcap;   /* MCGCAP MSR: machine check capabilities of CPU */
 };
 
-/*
- * This structure contains all data related to the MCE log.  Also
- * carries a signature to make it easier to find from external
- * debugging tools.  Each entry is only valid when its finished flag
- * is set.
- */
-
-#define MCE_LOG_LEN 32
-
-struct mce_log {
-       char signature[12]; /* "MACHINECHECK" */
-       unsigned len;       /* = MCE_LOG_LEN */
-       unsigned next;
-       unsigned flags;
-       unsigned recordlen;     /* length of struct mce */
-       struct mce entry[MCE_LOG_LEN];
-};
-
-#define MCE_OVERFLOW 0         /* bit 0 in flags means overflow */
-
-#define MCE_LOG_SIGNATURE      "MACHINECHECK"
-
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)
 #define MCE_GET_LOG_LEN      _IOR('M', 2, int)
 #define MCE_GETCLEAR_FLAGS   _IOR('M', 3, int)
 
-/* Software defined banks */
-#define MCE_EXTENDED_BANK      128
-#define MCE_THERMAL_BANK       MCE_EXTENDED_BANK + 0
-#define K8_MCE_THRESHOLD_BASE      (MCE_EXTENDED_BANK + 1)
-
 #endif /* _UAPI_ASM_X86_MCE_H */
index 433a59fb1a7411b97cd0db54d30a3bc727a4aeea..075a402555912541fe0b502c6c6b877812a5b089 100644 (file)
 /* Fam 15h MSRs */
 #define MSR_F15H_PERF_CTL              0xc0010200
 #define MSR_F15H_PERF_CTR              0xc0010201
+#define MSR_F15H_NB_PERF_CTL           0xc0010240
+#define MSR_F15H_NB_PERF_CTR           0xc0010241
 
 /* Fam 10h MSRs */
 #define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
index 34e923a537628777aa4e5ac90741bef6ef575633..ac3b3d002833024a6641ffbbc5d7ead7a99ca23b 100644 (file)
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC)         += trace_clock.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
-obj-$(CONFIG_KPROBES)          += kprobes.o
-obj-$(CONFIG_OPTPROBES)                += kprobes-opt.o
+obj-y                          += kprobes/
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_DOUBLEFAULT)      += doublefault_32.o
 obj-$(CONFIG_KGDB)             += kgdb.o
index afdc3f756dea0d0a1e4f460e1c3fad7eb782577b..c9876efecafb53870def4d713292b715d0c9e4ef 100644 (file)
@@ -240,7 +240,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
                dw_apb_clockevent_pause(adev->timer);
                if (system_state == SYSTEM_RUNNING) {
                        pr_debug("skipping APBT CPU %lu offline\n", cpu);
-               } else if (adev) {
+               } else {
                        pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
                        dw_apb_clockevent_stop(adev->timer);
                }
@@ -311,7 +311,6 @@ void __init apbt_time_init(void)
 #ifdef CONFIG_SMP
        int i;
        struct sfi_timer_table_entry *p_mtmr;
-       unsigned int percpu_timer;
        struct apbt_dev *adev;
 #endif
 
@@ -346,13 +345,10 @@ void __init apbt_time_init(void)
                return;
        }
        pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
-       if (num_possible_cpus() <= sfi_mtimer_num) {
-               percpu_timer = 1;
+       if (num_possible_cpus() <= sfi_mtimer_num)
                apbt_num_timers_used = num_possible_cpus();
-       } else {
-               percpu_timer = 0;
+       else
                apbt_num_timers_used = 1;
-       }
        pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
 
        /* here we set up per CPU timer data structure */
index b994cc84aa7e0f0654775f82b286c77d28d9d621..a5b4dce1b7acb74552908e05d77b9354f108739b 100644 (file)
@@ -1477,8 +1477,7 @@ void __init bsp_end_local_APIC_setup(void)
         * Now that local APIC setup is completed for BP, configure the fault
         * handling for interrupt remapping.
         */
-       if (irq_remapping_enabled)
-               irq_remap_enable_fault_handling();
+       irq_remap_enable_fault_handling();
 
 }
 
@@ -2251,8 +2250,7 @@ static int lapic_suspend(void)
        local_irq_save(flags);
        disable_local_APIC();
 
-       if (irq_remapping_enabled)
-               irq_remapping_disable();
+       irq_remapping_disable();
 
        local_irq_restore(flags);
        return 0;
@@ -2268,16 +2266,15 @@ static void lapic_resume(void)
                return;
 
        local_irq_save(flags);
-       if (irq_remapping_enabled) {
-               /*
-                * IO-APIC and PIC have their own resume routines.
-                * We just mask them here to make sure the interrupt
-                * subsystem is completely quiet while we enable x2apic
-                * and interrupt-remapping.
-                */
-               mask_ioapic_entries();
-               legacy_pic->mask_all();
-       }
+
+       /*
+        * IO-APIC and PIC have their own resume routines.
+        * We just mask them here to make sure the interrupt
+        * subsystem is completely quiet while we enable x2apic
+        * and interrupt-remapping.
+        */
+       mask_ioapic_entries();
+       legacy_pic->mask_all();
 
        if (x2apic_mode)
                enable_x2apic();
@@ -2320,8 +2317,7 @@ static void lapic_resume(void)
        apic_write(APIC_ESR, 0);
        apic_read(APIC_ESR);
 
-       if (irq_remapping_enabled)
-               irq_remapping_reenable(x2apic_mode);
+       irq_remapping_reenable(x2apic_mode);
 
        local_irq_restore(flags);
 }
index b739d398bb29bc6951e777827cd56c982cedb452..9ed796ccc32ccda58fe9e1d914aa2c7828845d0c 100644 (file)
 #define for_each_irq_pin(entry, head) \
        for (entry = head; entry; entry = entry->next)
 
-#ifdef CONFIG_IRQ_REMAP
-static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
-static inline bool irq_remapped(struct irq_cfg *cfg)
-{
-       return cfg->irq_2_iommu.iommu != NULL;
-}
-#else
-static inline bool irq_remapped(struct irq_cfg *cfg)
-{
-       return false;
-}
-static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
-{
-}
-#endif
-
 /*
  *      Is the SiS APIC rmw bug present ?
  *      -1 = don't know, 0 = no, 1 = yes
@@ -300,9 +284,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
        return cfg;
 }
 
-static int alloc_irq_from(unsigned int from, int node)
+static int alloc_irqs_from(unsigned int from, unsigned int count, int node)
 {
-       return irq_alloc_desc_from(from, node);
+       return irq_alloc_descs_from(from, count, node);
 }
 
 static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
@@ -326,7 +310,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
                + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
 }
 
-static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
+void io_apic_eoi(unsigned int apic, unsigned int vector)
 {
        struct io_apic __iomem *io_apic = io_apic_base(apic);
        writel(vector, &io_apic->eoi);
@@ -573,19 +557,10 @@ static void unmask_ioapic_irq(struct irq_data *data)
  * Otherwise, we simulate the EOI message manually by changing the trigger
  * mode to edge and then back to level, with RTE being masked during this.
  */
-static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
+void native_eoi_ioapic_pin(int apic, int pin, int vector)
 {
        if (mpc_ioapic_ver(apic) >= 0x20) {
-               /*
-                * Intr-remapping uses pin number as the virtual vector
-                * in the RTE. Actual vector is programmed in
-                * intr-remapping table entry. Hence for the io-apic
-                * EOI we use the pin number.
-                */
-               if (cfg && irq_remapped(cfg))
-                       io_apic_eoi(apic, pin);
-               else
-                       io_apic_eoi(apic, vector);
+               io_apic_eoi(apic, vector);
        } else {
                struct IO_APIC_route_entry entry, entry1;
 
@@ -606,14 +581,15 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
        }
 }
 
-static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
+void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
 {
        struct irq_pin_list *entry;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
        for_each_irq_pin(entry, cfg->irq_2_pin)
-               __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg);
+               x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
+                                              cfg->vector);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -650,7 +626,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
                }
 
                raw_spin_lock_irqsave(&ioapic_lock, flags);
-               __eoi_ioapic_pin(apic, pin, entry.vector, NULL);
+               x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
                raw_spin_unlock_irqrestore(&ioapic_lock, flags);
        }
 
@@ -1304,25 +1280,18 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
                fasteoi = false;
        }
 
-       if (irq_remapped(cfg)) {
-               irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-               irq_remap_modify_chip_defaults(chip);
+       if (setup_remapped_irq(irq, cfg, chip))
                fasteoi = trigger != 0;
-       }
 
        hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
        irq_set_chip_and_handler_name(irq, chip, hdl,
                                      fasteoi ? "fasteoi" : "edge");
 }
 
-static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
-                              unsigned int destination, int vector,
-                              struct io_apic_irq_attr *attr)
+int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
+                             unsigned int destination, int vector,
+                             struct io_apic_irq_attr *attr)
 {
-       if (irq_remapping_enabled)
-               return setup_ioapic_remapped_entry(irq, entry, destination,
-                                                  vector, attr);
-
        memset(entry, 0, sizeof(*entry));
 
        entry->delivery_mode = apic->irq_delivery_mode;
@@ -1370,8 +1339,8 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
                    attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
                    cfg->vector, irq, attr->trigger, attr->polarity, dest);
 
-       if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
-               pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
+       if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
+               pr_warn("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
                        mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
                __clear_irq_vector(irq, cfg);
 
@@ -1479,9 +1448,6 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
        struct IO_APIC_route_entry entry;
        unsigned int dest;
 
-       if (irq_remapping_enabled)
-               return;
-
        memset(&entry, 0, sizeof(entry));
 
        /*
@@ -1513,9 +1479,63 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
        ioapic_write_entry(ioapic_idx, pin, entry);
 }
 
-__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
 {
        int i;
+
+       pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
+
+       for (i = 0; i <= nr_entries; i++) {
+               struct IO_APIC_route_entry entry;
+
+               entry = ioapic_read_entry(apic, i);
+
+               pr_debug(" %02x %02X  ", i, entry.dest);
+               pr_cont("%1d    %1d    %1d   %1d   %1d    "
+                       "%1d    %1d    %02X\n",
+                       entry.mask,
+                       entry.trigger,
+                       entry.irr,
+                       entry.polarity,
+                       entry.delivery_status,
+                       entry.dest_mode,
+                       entry.delivery_mode,
+                       entry.vector);
+       }
+}
+
+void intel_ir_io_apic_print_entries(unsigned int apic,
+                                   unsigned int nr_entries)
+{
+       int i;
+
+       pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
+
+       for (i = 0; i <= nr_entries; i++) {
+               struct IR_IO_APIC_route_entry *ir_entry;
+               struct IO_APIC_route_entry entry;
+
+               entry = ioapic_read_entry(apic, i);
+
+               ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
+
+               pr_debug(" %02x %04X ", i, ir_entry->index);
+               pr_cont("%1d   %1d    %1d    %1d   %1d   "
+                       "%1d    %1d     %X    %02X\n",
+                       ir_entry->format,
+                       ir_entry->mask,
+                       ir_entry->trigger,
+                       ir_entry->irr,
+                       ir_entry->polarity,
+                       ir_entry->delivery_status,
+                       ir_entry->index2,
+                       ir_entry->zero,
+                       ir_entry->vector);
+       }
+}
+
+__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+{
        union IO_APIC_reg_00 reg_00;
        union IO_APIC_reg_01 reg_01;
        union IO_APIC_reg_02 reg_02;
@@ -1568,58 +1588,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
 
        printk(KERN_DEBUG ".... IRQ redirection table:\n");
 
-       if (irq_remapping_enabled) {
-               printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
-                       " Pol Stat Indx2 Zero Vect:\n");
-       } else {
-               printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
-                       " Stat Dmod Deli Vect:\n");
-       }
-
-       for (i = 0; i <= reg_01.bits.entries; i++) {
-               if (irq_remapping_enabled) {
-                       struct IO_APIC_route_entry entry;
-                       struct IR_IO_APIC_route_entry *ir_entry;
-
-                       entry = ioapic_read_entry(ioapic_idx, i);
-                       ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
-                       printk(KERN_DEBUG " %02x %04X ",
-                               i,
-                               ir_entry->index
-                       );
-                       pr_cont("%1d   %1d    %1d    %1d   %1d   "
-                               "%1d    %1d     %X    %02X\n",
-                               ir_entry->format,
-                               ir_entry->mask,
-                               ir_entry->trigger,
-                               ir_entry->irr,
-                               ir_entry->polarity,
-                               ir_entry->delivery_status,
-                               ir_entry->index2,
-                               ir_entry->zero,
-                               ir_entry->vector
-                       );
-               } else {
-                       struct IO_APIC_route_entry entry;
-
-                       entry = ioapic_read_entry(ioapic_idx, i);
-                       printk(KERN_DEBUG " %02x %02X  ",
-                               i,
-                               entry.dest
-                       );
-                       pr_cont("%1d    %1d    %1d   %1d   %1d    "
-                               "%1d    %1d    %02X\n",
-                               entry.mask,
-                               entry.trigger,
-                               entry.irr,
-                               entry.polarity,
-                               entry.delivery_status,
-                               entry.dest_mode,
-                               entry.delivery_mode,
-                               entry.vector
-                       );
-               }
-       }
+       x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
 }
 
 __apicdebuginit(void) print_IO_APICs(void)
@@ -1921,30 +1890,14 @@ void __init enable_IO_APIC(void)
        clear_IO_APIC();
 }
 
-/*
- * Not an __init, needed by the reboot code
- */
-void disable_IO_APIC(void)
+void native_disable_io_apic(void)
 {
-       /*
-        * Clear the IO-APIC before rebooting:
-        */
-       clear_IO_APIC();
-
-       if (!legacy_pic->nr_legacy_irqs)
-               return;
-
        /*
         * If the i8259 is routed through an IOAPIC
         * Put that IOAPIC in virtual wire mode
         * so legacy interrupts can be delivered.
-        *
-        * With interrupt-remapping, for now we will use virtual wire A mode,
-        * as virtual wire B is little complex (need to configure both
-        * IOAPIC RTE as well as interrupt-remapping table entry).
-        * As this gets called during crash dump, keep this simple for now.
         */
-       if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) {
+       if (ioapic_i8259.pin != -1) {
                struct IO_APIC_route_entry entry;
 
                memset(&entry, 0, sizeof(entry));
@@ -1964,12 +1917,25 @@ void disable_IO_APIC(void)
                ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
        }
 
+       if (cpu_has_apic || apic_from_smp_config())
+               disconnect_bsp_APIC(ioapic_i8259.pin != -1);
+
+}
+
+/*
+ * Not an __init, needed by the reboot code
+ */
+void disable_IO_APIC(void)
+{
        /*
-        * Use virtual wire A mode when interrupt remapping is enabled.
+        * Clear the IO-APIC before rebooting:
         */
-       if (cpu_has_apic || apic_from_smp_config())
-               disconnect_bsp_APIC(!irq_remapping_enabled &&
-                               ioapic_i8259.pin != -1);
+       clear_IO_APIC();
+
+       if (!legacy_pic->nr_legacy_irqs)
+               return;
+
+       x86_io_apic_ops.disable();
 }
 
 #ifdef CONFIG_X86_32
@@ -2322,12 +2288,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
 
                apic = entry->apic;
                pin = entry->pin;
-               /*
-                * With interrupt-remapping, destination information comes
-                * from interrupt-remapping table entry.
-                */
-               if (!irq_remapped(cfg))
-                       io_apic_write(apic, 0x11 + pin*2, dest);
+
+               io_apic_write(apic, 0x11 + pin*2, dest);
                reg = io_apic_read(apic, 0x10 + pin*2);
                reg &= ~IO_APIC_REDIR_VECTOR_MASK;
                reg |= vector;
@@ -2369,9 +2331,10 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
        return 0;
 }
 
-static int
-ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                   bool force)
+
+int native_ioapic_set_affinity(struct irq_data *data,
+                              const struct cpumask *mask,
+                              bool force)
 {
        unsigned int dest, irq = data->irq;
        unsigned long flags;
@@ -2548,33 +2511,6 @@ static void ack_apic_level(struct irq_data *data)
        ioapic_irqd_unmask(data, cfg, masked);
 }
 
-#ifdef CONFIG_IRQ_REMAP
-static void ir_ack_apic_edge(struct irq_data *data)
-{
-       ack_APIC_irq();
-}
-
-static void ir_ack_apic_level(struct irq_data *data)
-{
-       ack_APIC_irq();
-       eoi_ioapic_irq(data->irq, data->chip_data);
-}
-
-static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
-{
-       seq_printf(p, " IR-%s", data->chip->name);
-}
-
-static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
-{
-       chip->irq_print_chip = ir_print_prefix;
-       chip->irq_ack = ir_ack_apic_edge;
-       chip->irq_eoi = ir_ack_apic_level;
-
-       chip->irq_set_affinity = set_remapped_irq_affinity;
-}
-#endif /* CONFIG_IRQ_REMAP */
-
 static struct irq_chip ioapic_chip __read_mostly = {
        .name                   = "IO-APIC",
        .irq_startup            = startup_ioapic_irq,
@@ -2582,7 +2518,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_unmask             = unmask_ioapic_irq,
        .irq_ack                = ack_apic_edge,
        .irq_eoi                = ack_apic_level,
-       .irq_set_affinity       = ioapic_set_affinity,
+       .irq_set_affinity       = native_ioapic_set_affinity,
        .irq_retrigger          = ioapic_retrigger_irq,
 };
 
@@ -2781,8 +2717,7 @@ static inline void __init check_timer(void)
         * 8259A.
         */
        if (pin1 == -1) {
-               if (irq_remapping_enabled)
-                       panic("BIOS bug: timer not connected to IO-APIC");
+               panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
                pin1 = pin2;
                apic1 = apic2;
                no_pin1 = 1;
@@ -2814,8 +2749,7 @@ static inline void __init check_timer(void)
                                clear_IO_APIC_pin(0, pin1);
                        goto out;
                }
-               if (irq_remapping_enabled)
-                       panic("timer doesn't work through Interrupt-remapped IO-APIC");
+               panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
                local_irq_disable();
                clear_IO_APIC_pin(apic1, pin1);
                if (!no_pin1)
@@ -2982,37 +2916,58 @@ device_initcall(ioapic_init_ops);
 /*
  * Dynamic irq allocate and deallocation
  */
-unsigned int create_irq_nr(unsigned int from, int node)
+unsigned int __create_irqs(unsigned int from, unsigned int count, int node)
 {
-       struct irq_cfg *cfg;
+       struct irq_cfg **cfg;
        unsigned long flags;
-       unsigned int ret = 0;
-       int irq;
+       int irq, i;
 
        if (from < nr_irqs_gsi)
                from = nr_irqs_gsi;
 
-       irq = alloc_irq_from(from, node);
-       if (irq < 0)
-               return 0;
-       cfg = alloc_irq_cfg(irq, node);
-       if (!cfg) {
-               free_irq_at(irq, NULL);
+       cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node);
+       if (!cfg)
                return 0;
+
+       irq = alloc_irqs_from(from, count, node);
+       if (irq < 0)
+               goto out_cfgs;
+
+       for (i = 0; i < count; i++) {
+               cfg[i] = alloc_irq_cfg(irq + i, node);
+               if (!cfg[i])
+                       goto out_irqs;
        }
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
-               ret = irq;
+       for (i = 0; i < count; i++)
+               if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus()))
+                       goto out_vecs;
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 
-       if (ret) {
-               irq_set_chip_data(irq, cfg);
-               irq_clear_status_flags(irq, IRQ_NOREQUEST);
-       } else {
-               free_irq_at(irq, cfg);
+       for (i = 0; i < count; i++) {
+               irq_set_chip_data(irq + i, cfg[i]);
+               irq_clear_status_flags(irq + i, IRQ_NOREQUEST);
        }
-       return ret;
+
+       kfree(cfg);
+       return irq;
+
+out_vecs:
+       for (i--; i >= 0; i--)
+               __clear_irq_vector(irq + i, cfg[i]);
+       raw_spin_unlock_irqrestore(&vector_lock, flags);
+out_irqs:
+       for (i = 0; i < count; i++)
+               free_irq_at(irq + i, cfg[i]);
+out_cfgs:
+       kfree(cfg);
+       return 0;
+}
+
+unsigned int create_irq_nr(unsigned int from, int node)
+{
+       return __create_irqs(from, 1, node);
 }
 
 int create_irq(void)
@@ -3037,48 +2992,35 @@ void destroy_irq(unsigned int irq)
 
        irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
 
-       if (irq_remapped(cfg))
-               free_remapped_irq(irq);
+       free_remapped_irq(irq);
+
        raw_spin_lock_irqsave(&vector_lock, flags);
        __clear_irq_vector(irq, cfg);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
        free_irq_at(irq, cfg);
 }
 
+void destroy_irqs(unsigned int irq, unsigned int count)
+{
+       unsigned int i;
+
+       for (i = 0; i < count; i++)
+               destroy_irq(irq + i);
+}
+
 /*
  * MSI message composition
  */
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
-                          struct msi_msg *msg, u8 hpet_id)
+void native_compose_msi_msg(struct pci_dev *pdev,
+                           unsigned int irq, unsigned int dest,
+                           struct msi_msg *msg, u8 hpet_id)
 {
-       struct irq_cfg *cfg;
-       int err;
-       unsigned dest;
-
-       if (disable_apic)
-               return -ENXIO;
-
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, apic->target_cpus());
-       if (err)
-               return err;
+       struct irq_cfg *cfg = irq_cfg(irq);
 
-       err = apic->cpu_mask_to_apicid_and(cfg->domain,
-                                          apic->target_cpus(), &dest);
-       if (err)
-               return err;
-
-       if (irq_remapped(cfg)) {
-               compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
-               return err;
-       }
+       msg->address_hi = MSI_ADDR_BASE_HI;
 
        if (x2apic_enabled())
-               msg->address_hi = MSI_ADDR_BASE_HI |
-                                 MSI_ADDR_EXT_DEST_ID(dest);
-       else
-               msg->address_hi = MSI_ADDR_BASE_HI;
+               msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
 
        msg->address_lo =
                MSI_ADDR_BASE_LO |
@@ -3097,8 +3039,32 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
                        MSI_DATA_DELIVERY_FIXED:
                        MSI_DATA_DELIVERY_LOWPRI) |
                MSI_DATA_VECTOR(cfg->vector);
+}
 
-       return err;
+#ifdef CONFIG_PCI_MSI
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+                          struct msi_msg *msg, u8 hpet_id)
+{
+       struct irq_cfg *cfg;
+       int err;
+       unsigned dest;
+
+       if (disable_apic)
+               return -ENXIO;
+
+       cfg = irq_cfg(irq);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
+       if (err)
+               return err;
+
+       err = apic->cpu_mask_to_apicid_and(cfg->domain,
+                                          apic->target_cpus(), &dest);
+       if (err)
+               return err;
+
+       x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+
+       return 0;
 }
 
 static int
@@ -3136,23 +3102,28 @@ static struct irq_chip msi_chip = {
        .irq_retrigger          = ioapic_retrigger_irq,
 };
 
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+                 unsigned int irq_base, unsigned int irq_offset)
 {
        struct irq_chip *chip = &msi_chip;
        struct msi_msg msg;
+       unsigned int irq = irq_base + irq_offset;
        int ret;
 
        ret = msi_compose_msg(dev, irq, &msg, -1);
        if (ret < 0)
                return ret;
 
-       irq_set_msi_desc(irq, msidesc);
-       write_msi_msg(irq, &msg);
+       irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
 
-       if (irq_remapped(irq_get_chip_data(irq))) {
-               irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-               irq_remap_modify_chip_defaults(chip);
-       }
+       /*
+        * MSI-X message is written per-IRQ, the offset is always 0.
+        * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+        */
+       if (!irq_offset)
+               write_msi_msg(irq, &msg);
+
+       setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
 
        irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
 
@@ -3163,46 +3134,26 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
 
 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
-       int node, ret, sub_handle, index = 0;
        unsigned int irq, irq_want;
        struct msi_desc *msidesc;
+       int node, ret;
 
-       /* x86 doesn't support multiple MSI yet */
+       /* Multiple MSI vectors only supported with interrupt remapping */
        if (type == PCI_CAP_ID_MSI && nvec > 1)
                return 1;
 
        node = dev_to_node(&dev->dev);
        irq_want = nr_irqs_gsi;
-       sub_handle = 0;
        list_for_each_entry(msidesc, &dev->msi_list, list) {
                irq = create_irq_nr(irq_want, node);
                if (irq == 0)
-                       return -1;
+                       return -ENOSPC;
+
                irq_want = irq + 1;
-               if (!irq_remapping_enabled)
-                       goto no_ir;
 
-               if (!sub_handle) {
-                       /*
-                        * allocate the consecutive block of IRTE's
-                        * for 'nvec'
-                        */
-                       index = msi_alloc_remapped_irq(dev, irq, nvec);
-                       if (index < 0) {
-                               ret = index;
-                               goto error;
-                       }
-               } else {
-                       ret = msi_setup_remapped_irq(dev, irq, index,
-                                                    sub_handle);
-                       if (ret < 0)
-                               goto error;
-               }
-no_ir:
-               ret = setup_msi_irq(dev, msidesc, irq);
+               ret = setup_msi_irq(dev, msidesc, irq, 0);
                if (ret < 0)
                        goto error;
-               sub_handle++;
        }
        return 0;
 
@@ -3298,26 +3249,19 @@ static struct irq_chip hpet_msi_type = {
        .irq_retrigger = ioapic_retrigger_irq,
 };
 
-int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
+int default_setup_hpet_msi(unsigned int irq, unsigned int id)
 {
        struct irq_chip *chip = &hpet_msi_type;
        struct msi_msg msg;
        int ret;
 
-       if (irq_remapping_enabled) {
-               ret = setup_hpet_msi_remapped(irq, id);
-               if (ret)
-                       return ret;
-       }
-
        ret = msi_compose_msg(NULL, irq, &msg, id);
        if (ret < 0)
                return ret;
 
        hpet_msi_write(irq_get_handler_data(irq), &msg);
        irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-       if (irq_remapped(irq_get_chip_data(irq)))
-               irq_remap_modify_chip_defaults(chip);
+       setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
 
        irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
        return 0;
@@ -3683,10 +3627,7 @@ void __init setup_ioapic_dest(void)
                else
                        mask = apic->target_cpus();
 
-               if (irq_remapping_enabled)
-                       set_remapped_irq_affinity(idata, mask, false);
-               else
-                       ioapic_set_affinity(idata, mask, false);
+               x86_io_apic_ops.set_affinity(idata, mask, false);
        }
 
 }
index cce91bf26676cddee4b5c42fa21016a801411079..7434d8556d091612f688e2325dc6e2c8abfcde77 100644 (file)
@@ -106,7 +106,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
        unsigned long mask = cpumask_bits(cpumask)[0];
        unsigned long flags;
 
-       if (WARN_ONCE(!mask, "empty IPI mask"))
+       if (!mask)
                return;
 
        local_irq_save(flags);
index e03a1e180e81789bb334918f7f92c2886142d491..562a76d433c8e286a45ecfb28bf5d88cd7ccd50a 100644 (file)
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
 }
 early_param("x2apic_phys", set_x2apic_phys_mode);
 
-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+static bool x2apic_fadt_phys(void)
 {
-       if (x2apic_phys)
-               return x2apic_enabled();
-       else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
-               (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
-               x2apic_enabled()) {
+       if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
+               (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
                printk(KERN_DEBUG "System requires x2apic physical mode\n");
-               return 1;
+               return true;
        }
-       else
-               return 0;
+       return false;
+}
+
+static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
 }
 
 static void
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
 
 static int x2apic_phys_probe(void)
 {
-       if (x2apic_mode && x2apic_phys)
+       if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
                return 1;
 
        return apic == &apic_x2apic_phys;
index 8cfade9510a41b10a03a8964a6ff9ed7c914cb2f..794f6eb54cd3bf6f55093819a938c7dc91448b0e 100644 (file)
@@ -5,7 +5,7 @@
  *
  * SGI UV APIC functions (note: not an Intel compatible APIC)
  *
- * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
  */
 #include <linux/cpumask.h>
 #include <linux/hardirq.h>
@@ -91,10 +91,16 @@ static int __init early_get_pnodeid(void)
        m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
        uv_min_hub_revision_id = node_id.s.revision;
 
-       if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
-               uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
-       if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+       switch (node_id.s.part_number) {
+       case UV2_HUB_PART_NUMBER:
+       case UV2_HUB_PART_NUMBER_X:
                uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+               break;
+       case UV3_HUB_PART_NUMBER:
+       case UV3_HUB_PART_NUMBER_X:
+               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+               break;
+       }
 
        uv_hub_info->hub_revision = uv_min_hub_revision_id;
        pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
@@ -130,13 +136,16 @@ static void __init uv_set_apicid_hibit(void)
 
 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 {
-       int pnodeid, is_uv1, is_uv2;
+       int pnodeid, is_uv1, is_uv2, is_uv3;
 
        is_uv1 = !strcmp(oem_id, "SGI");
        is_uv2 = !strcmp(oem_id, "SGI2");
-       if (is_uv1 || is_uv2) {
+       is_uv3 = !strncmp(oem_id, "SGI3", 4);   /* there are varieties of UV3 */
+       if (is_uv1 || is_uv2 || is_uv3) {
                uv_hub_info->hub_revision =
-                       is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
+                       (is_uv1 ? UV1_HUB_REVISION_BASE :
+                       (is_uv2 ? UV2_HUB_REVISION_BASE :
+                                 UV3_HUB_REVISION_BASE));
                pnodeid = early_get_pnodeid();
                early_get_apic_pnode_shift();
                x86_platform.is_untracked_pat_range =  uv_is_untracked_pat_range;
@@ -450,14 +459,17 @@ static __init void map_high(char *id, unsigned long base, int pshift,
 
        paddr = base << pshift;
        bytes = (1UL << bshift) * (max_pnode + 1);
-       printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
-                                               paddr + bytes);
+       if (!paddr) {
+               pr_info("UV: Map %s_HI base address NULL\n", id);
+               return;
+       }
+       pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
        if (map_type == map_uc)
                init_extra_mapping_uc(paddr, bytes);
        else
                init_extra_mapping_wb(paddr, bytes);
-
 }
+
 static __init void map_gru_high(int max_pnode)
 {
        union uvh_rh_gam_gru_overlay_config_mmr_u gru;
@@ -468,7 +480,8 @@ static __init void map_gru_high(int max_pnode)
                map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
                gru_start_paddr = ((u64)gru.s.base << shift);
                gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
-
+       } else {
+               pr_info("UV: GRU disabled\n");
        }
 }
 
@@ -480,23 +493,146 @@ static __init void map_mmr_high(int max_pnode)
        mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
        if (mmr.s.enable)
                map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
+       else
+               pr_info("UV: MMR disabled\n");
+}
+
+/*
+ * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
+ * and REDIRECT MMR regs are exactly the same on UV3.
+ */
+struct mmioh_config {
+       unsigned long overlay;
+       unsigned long redirect;
+       char *id;
+};
+
+static __initdata struct mmioh_config mmiohs[] = {
+       {
+               UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
+               UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
+               "MMIOH0"
+       },
+       {
+               UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
+               UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
+               "MMIOH1"
+       },
+};
+
+static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
+{
+       union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
+       unsigned long mmr;
+       unsigned long base;
+       int i, n, shift, m_io, max_io;
+       int nasid, lnasid, fi, li;
+       char *id;
+
+       id = mmiohs[index].id;
+       overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
+       pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
+               id, overlay.v, overlay.s3.base, overlay.s3.m_io);
+       if (!overlay.s3.enable) {
+               pr_info("UV: %s disabled\n", id);
+               return;
+       }
+
+       shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
+       base = (unsigned long)overlay.s3.base;
+       m_io = overlay.s3.m_io;
+       mmr = mmiohs[index].redirect;
+       n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
+       min_pnode *= 2;                         /* convert to NASID */
+       max_pnode *= 2;
+       max_io = lnasid = fi = li = -1;
+
+       for (i = 0; i < n; i++) {
+               union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
+
+               redirect.v = uv_read_local_mmr(mmr + i * 8);
+               nasid = redirect.s3.nasid;
+               if (nasid < min_pnode || max_pnode < nasid)
+                       nasid = -1;             /* invalid NASID */
+
+               if (nasid == lnasid) {
+                       li = i;
+                       if (i != n-1)           /* last entry check */
+                               continue;
+               }
+
+               /* check if we have a cached (or last) redirect to print */
+               if (lnasid != -1 || (i == n-1 && nasid != -1))  {
+                       unsigned long addr1, addr2;
+                       int f, l;
+
+                       if (lnasid == -1) {
+                               f = l = i;
+                               lnasid = nasid;
+                       } else {
+                               f = fi;
+                               l = li;
+                       }
+                       addr1 = (base << shift) +
+                               f * (unsigned long)(1 << m_io);
+                       addr2 = (base << shift) +
+                               (l + 1) * (unsigned long)(1 << m_io);
+                       pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
+                               id, fi, li, lnasid, addr1, addr2);
+                       if (max_io < l)
+                               max_io = l;
+               }
+               fi = li = i;
+               lnasid = nasid;
+       }
+
+       pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
+               id, base, shift, m_io, max_io);
+
+       if (max_io >= 0)
+               map_high(id, base, shift, m_io, max_io, map_uc);
 }
 
-static __init void map_mmioh_high(int max_pnode)
+static __init void map_mmioh_high(int min_pnode, int max_pnode)
 {
        union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
-       int shift;
+       unsigned long mmr, base;
+       int shift, enable, m_io, n_io;
 
-       mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
-       if (is_uv1_hub() && mmioh.s1.enable) {
-               shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
-               map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
-                       max_pnode, map_uc);
+       if (is_uv3_hub()) {
+               /* Map both MMIOH Regions */
+               map_mmioh_high_uv3(0, min_pnode, max_pnode);
+               map_mmioh_high_uv3(1, min_pnode, max_pnode);
+               return;
        }
-       if (is_uv2_hub() && mmioh.s2.enable) {
+
+       if (is_uv1_hub()) {
+               mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
+               shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+               mmioh.v = uv_read_local_mmr(mmr);
+               enable = !!mmioh.s1.enable;
+               base = mmioh.s1.base;
+               m_io = mmioh.s1.m_io;
+               n_io = mmioh.s1.n_io;
+       } else if (is_uv2_hub()) {
+               mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
                shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
-               map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
-                       max_pnode, map_uc);
+               mmioh.v = uv_read_local_mmr(mmr);
+               enable = !!mmioh.s2.enable;
+               base = mmioh.s2.base;
+               m_io = mmioh.s2.m_io;
+               n_io = mmioh.s2.n_io;
+       } else
+               return;
+
+       if (enable) {
+               max_pnode &= (1 << n_io) - 1;
+               pr_info(
+                   "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
+                       base, shift, m_io, n_io, max_pnode);
+               map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
+       } else {
+               pr_info("UV: MMIOH disabled\n");
        }
 }
 
@@ -724,42 +860,41 @@ void uv_nmi_init(void)
 void __init uv_system_init(void)
 {
        union uvh_rh_gam_config_mmr_u  m_n_config;
-       union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
        union uvh_node_id_u node_id;
        unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
-       int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io;
-       int gnode_extra, max_pnode = 0;
+       int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
+       int gnode_extra, min_pnode = 999999, max_pnode = -1;
        unsigned long mmr_base, present, paddr;
-       unsigned short pnode_mask, pnode_io_mask;
+       unsigned short pnode_mask;
+       char *hub = (is_uv1_hub() ? "UV1" :
+                   (is_uv2_hub() ? "UV2" :
+                                   "UV3"));
 
-       printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
+       pr_info("UV: Found %s hub\n", hub);
        map_low_mmrs();
 
        m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
        n_val = m_n_config.s.n_skt;
-       mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
-       n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
+       pnode_mask = (1 << n_val) - 1;
        mmr_base =
            uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
            ~UV_MMR_ENABLE;
-       pnode_mask = (1 << n_val) - 1;
-       pnode_io_mask = (1 << n_io) - 1;
 
        node_id.v = uv_read_local_mmr(UVH_NODE_ID);
        gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
        gnode_upper = ((unsigned long)gnode_extra  << m_val);
-       printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n",
-                       n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask);
+       pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x\n",
+                       n_val, m_val, pnode_mask, gnode_upper, gnode_extra);
 
-       printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
+       pr_info("UV: global MMR base 0x%lx\n", mmr_base);
 
        for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
                uv_possible_blades +=
                  hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
 
        /* uv_num_possible_blades() is really the hub count */
-       printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
+       pr_info("UV: Found %d blades, %d hubs\n",
                        is_uv1_hub() ? uv_num_possible_blades() :
                        (uv_num_possible_blades() + 1) / 2,
                        uv_num_possible_blades());
@@ -794,6 +929,7 @@ void __init uv_system_init(void)
                        uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
                        spin_lock_init(&uv_blade_info[blade].nmi_lock);
+                       min_pnode = min(pnode, min_pnode);
                        max_pnode = max(pnode, max_pnode);
                        blade++;
                }
@@ -856,7 +992,7 @@ void __init uv_system_init(void)
 
        map_gru_high(max_pnode);
        map_mmr_high(max_pnode);
-       map_mmioh_high(max_pnode & pnode_io_mask);
+       map_mmioh_high(min_pnode, max_pnode);
 
        uv_cpu_init();
        uv_scir_register_cpu_notifier();
index d65464e4350343c9d379aa6d44a1ed03a1ca1f2c..8d7012b7f4020ffe3fc7cb7da0ed15cdfd05e5e7 100644 (file)
@@ -899,6 +899,7 @@ static void apm_cpu_idle(void)
        static int use_apm_idle; /* = 0 */
        static unsigned int last_jiffies; /* = 0 */
        static unsigned int last_stime; /* = 0 */
+       cputime_t stime;
 
        int apm_idle_done = 0;
        unsigned int jiffies_since_last_check = jiffies - last_jiffies;
@@ -906,23 +907,23 @@ static void apm_cpu_idle(void)
 
        WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
 recalc:
+       task_cputime(current, NULL, &stime);
        if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
                use_apm_idle = 0;
-               last_jiffies = jiffies;
-               last_stime = current->stime;
        } else if (jiffies_since_last_check > idle_period) {
                unsigned int idle_percentage;
 
-               idle_percentage = current->stime - last_stime;
+               idle_percentage = stime - last_stime;
                idle_percentage *= 100;
                idle_percentage /= jiffies_since_last_check;
                use_apm_idle = (idle_percentage > idle_threshold);
                if (apm_info.forbid_idle)
                        use_apm_idle = 0;
-               last_jiffies = jiffies;
-               last_stime = current->stime;
        }
 
+       last_jiffies = jiffies;
+       last_stime = stime;
+
        bucket = IDLE_LEAKY_MAX;
 
        while (!need_resched()) {
index 15239fffd6fee747913a1f0e493c512885239379..782c456eaa01ae02460ddd2efabace01a507b780 100644 (file)
@@ -364,9 +364,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
 #endif
 }
 
-int amd_get_nb_id(int cpu)
+u16 amd_get_nb_id(int cpu)
 {
-       int id = 0;
+       u16 id = 0;
 #ifdef CONFIG_SMP
        id = per_cpu(cpu_llc_id, cpu);
 #endif
index a8f8fa9769d680869d1f9e533f79608baa181ca7..1e7e84a02ebaf4924702019c18cc5d1101609c30 100644 (file)
@@ -79,3 +79,10 @@ void __init init_hypervisor_platform(void)
        if (x86_hyper->init_platform)
                x86_hyper->init_platform();
 }
+
+bool __init hypervisor_x2apic_available(void)
+{
+       return x86_hyper                   &&
+              x86_hyper->x2apic_available &&
+              x86_hyper->x2apic_available();
+}
index fe9edec6698a3c0c15846c0e56c1a502cea05c08..7c6f7d548c0f94d76c094a950f7ef29fc1356b32 100644 (file)
@@ -298,8 +298,7 @@ struct _cache_attr {
                         unsigned int);
 };
 
-#ifdef CONFIG_AMD_NB
-
+#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
 /*
  * L3 cache descriptors
  */
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
 static struct _cache_attr subcaches =
        __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
 
-#else  /* CONFIG_AMD_NB */
+#else
 #define amd_init_l3_cache(x, y)
-#endif /* CONFIG_AMD_NB */
+#endif  /* CONFIG_AMD_NB && CONFIG_SYSFS */
 
 static int
 __cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1227,7 +1226,7 @@ static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
        .notifier_call = cacheinfo_cpu_callback,
 };
 
-static int __cpuinit cache_sysfs_init(void)
+static int __init cache_sysfs_init(void)
 {
        int i;
 
index 80dbda84f1c3c2b215242d5e5381848a4ec0c372..fc7608a89d932b0b5138ab2e2acf936dd68b174b 100644 (file)
@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c)
 
 static void mce_schedule_work(void)
 {
-       if (!mce_ring_empty()) {
-               struct work_struct *work = &__get_cpu_var(mce_work);
-               if (!work_pending(work))
-                       schedule_work(work);
-       }
+       if (!mce_ring_empty())
+               schedule_work(&__get_cpu_var(mce_work));
 }
 
 DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -1351,12 +1348,7 @@ int mce_notify_irq(void)
                /* wake processes polling /dev/mcelog */
                wake_up_interruptible(&mce_chrdev_wait);
 
-               /*
-                * There is no risk of missing notifications because
-                * work_pending is always cleared before the function is
-                * executed.
-                */
-               if (mce_helper[0] && !work_pending(&mce_trigger_work))
+               if (mce_helper[0])
                        schedule_work(&mce_trigger_work);
 
                if (__ratelimit(&ratelimit))
index 0a630dd4b62061ae713803ba29684fbe35a96312..a7d26d83fb700b254eca03d9ff5e8f6d56f7a0c5 100644 (file)
 #include <linux/time.h>
 #include <linux/clocksource.h>
 #include <linux/module.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
 #include <asm/processor.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv.h>
 #include <asm/mshyperv.h>
+#include <asm/desc.h>
+#include <asm/idle.h>
+#include <asm/irq_regs.h>
 
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -30,6 +35,13 @@ static bool __init ms_hyperv_platform(void)
        if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return false;
 
+       /*
+        * Xen emulates Hyper-V to support enlightened Windows.
+        * Check to see first if we are on a Xen Hypervisor.
+        */
+       if (xen_cpuid_base())
+               return false;
+
        cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
              &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
 
@@ -68,7 +80,14 @@ static void __init ms_hyperv_init_platform(void)
        printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
               ms_hyperv.features, ms_hyperv.hints);
 
-       clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+       if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
+               clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+#if IS_ENABLED(CONFIG_HYPERV)
+       /*
+        * Setup the IDT for hypervisor callback.
+        */
+       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+#endif
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -77,3 +96,36 @@ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
        .init_platform          = ms_hyperv_init_platform,
 };
 EXPORT_SYMBOL(x86_hyper_ms_hyperv);
+
+#if IS_ENABLED(CONFIG_HYPERV)
+static int vmbus_irq = -1;
+static irq_handler_t vmbus_isr;
+
+void hv_register_vmbus_handler(int irq, irq_handler_t handler)
+{
+       vmbus_irq = irq;
+       vmbus_isr = handler;
+}
+
+void hyperv_vector_handler(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+       struct irq_desc *desc;
+
+       irq_enter();
+       exit_idle();
+
+       desc = irq_to_desc(vmbus_irq);
+
+       if (desc)
+               generic_handle_irq_desc(vmbus_irq, desc);
+
+       irq_exit();
+       set_irq_regs(old_regs);
+}
+#else
+void hv_register_vmbus_handler(int irq, irq_handler_t handler)
+{
+}
+#endif
+EXPORT_SYMBOL_GPL(hv_register_vmbus_handler);
index 4428fd178bce07a3a8fd75b81c61b83fc63d9258..bf0f01aea99449e53a87f092a84b3263263da911 100644 (file)
@@ -340,9 +340,6 @@ int x86_setup_perfctr(struct perf_event *event)
                /* BTS is currently only allowed for user-mode. */
                if (!attr->exclude_kernel)
                        return -EOPNOTSUPP;
-
-               if (!attr->exclude_guest)
-                       return -EOPNOTSUPP;
        }
 
        hwc->config |= config;
@@ -385,9 +382,6 @@ int x86_pmu_hw_config(struct perf_event *event)
        if (event->attr.precise_ip) {
                int precise = 0;
 
-               if (!event->attr.exclude_guest)
-                       return -EOPNOTSUPP;
-
                /* Support for constant skid */
                if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
                        precise++;
@@ -835,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
        } else {
                hwc->config_base = x86_pmu_config_addr(hwc->idx);
                hwc->event_base  = x86_pmu_event_addr(hwc->idx);
-               hwc->event_base_rdpmc = hwc->idx;
+               hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
        }
 }
 
@@ -1316,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = {
        .attrs = NULL,
 };
 
-struct perf_pmu_events_attr {
-       struct device_attribute attr;
-       u64 id;
-};
-
 /*
  * Remove all undefined events (x86_pmu.event_map(id) == 0)
  * out of events_attr attributes.
@@ -1354,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at
 #define EVENT_VAR(_id)  event_attr_##_id
 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
 
-#define EVENT_ATTR(_name, _id)                                 \
-static struct perf_pmu_events_attr EVENT_VAR(_id) = {          \
-       .attr = __ATTR(_name, 0444, events_sysfs_show, NULL),   \
-       .id   =  PERF_COUNT_HW_##_id,                           \
-};
+#define EVENT_ATTR(_name, _id)                                         \
+       PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id,      \
+                       events_sysfs_show)
 
 EVENT_ATTR(cpu-cycles,                 CPU_CYCLES              );
 EVENT_ATTR(instructions,               INSTRUCTIONS            );
index 115c1ea977469b0fda5b43f76e8595575effb24b..7f5c75c2afdd9552653ffd7c9babb8c871669b05 100644 (file)
@@ -325,6 +325,8 @@ struct x86_pmu {
        int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
+       int             (*addr_offset)(int index, bool eventsel);
+       int             (*rdpmc_index)(int index);
        u64             (*event_map)(int);
        int             max_events;
        int             num_counters;
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs
 
 u64 x86_perf_event_update(struct perf_event *event);
 
-static inline int x86_pmu_addr_offset(int index)
+static inline unsigned int x86_pmu_config_addr(int index)
 {
-       int offset;
-
-       /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
-       alternative_io(ASM_NOP2,
-                      "shll $1, %%eax",
-                      X86_FEATURE_PERFCTR_CORE,
-                      "=a" (offset),
-                      "a"  (index));
-
-       return offset;
+       return x86_pmu.eventsel + (x86_pmu.addr_offset ?
+                                  x86_pmu.addr_offset(index, true) : index);
 }
 
-static inline unsigned int x86_pmu_config_addr(int index)
+static inline unsigned int x86_pmu_event_addr(int index)
 {
-       return x86_pmu.eventsel + x86_pmu_addr_offset(index);
+       return x86_pmu.perfctr + (x86_pmu.addr_offset ?
+                                 x86_pmu.addr_offset(index, false) : index);
 }
 
-static inline unsigned int x86_pmu_event_addr(int index)
+static inline int x86_pmu_rdpmc_index(int index)
 {
-       return x86_pmu.perfctr + x86_pmu_addr_offset(index);
+       return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
 }
 
 int x86_setup_perfctr(struct perf_event *event);
index c93bc4e813a0c10303984db79cafc1d6ee5a6caf..dfdab42aed27372503d5fe33cd29a4ef97ae580b 100644 (file)
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event)
        return amd_perfmon_event_map[hw_event];
 }
 
-static int amd_pmu_hw_config(struct perf_event *event)
+static struct event_constraint *amd_nb_event_constraint;
+
+/*
+ * Previously calculated offsets
+ */
+static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
+static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
+static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
+
+/*
+ * Legacy CPUs:
+ *   4 counters starting at 0xc0010000 each offset by 1
+ *
+ * CPUs with core performance counter extensions:
+ *   6 counters starting at 0xc0010200 each offset by 2
+ *
+ * CPUs with north bridge performance counter extensions:
+ *   4 additional counters starting at 0xc0010240 each offset by 2
+ *   (indexed right above either one of the above core counters)
+ */
+static inline int amd_pmu_addr_offset(int index, bool eventsel)
 {
-       int ret;
+       int offset, first, base;
 
-       /* pass precise event sampling to ibs: */
-       if (event->attr.precise_ip && get_ibs_caps())
-               return -ENOENT;
+       if (!index)
+               return index;
+
+       if (eventsel)
+               offset = event_offsets[index];
+       else
+               offset = count_offsets[index];
+
+       if (offset)
+               return offset;
+
+       if (amd_nb_event_constraint &&
+           test_bit(index, amd_nb_event_constraint->idxmsk)) {
+               /*
+                * calculate the offset of NB counters with respect to
+                * base eventsel or perfctr
+                */
+
+               first = find_first_bit(amd_nb_event_constraint->idxmsk,
+                                      X86_PMC_IDX_MAX);
+
+               if (eventsel)
+                       base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
+               else
+                       base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
+
+               offset = base + ((index - first) << 1);
+       } else if (!cpu_has_perfctr_core)
+               offset = index;
+       else
+               offset = index << 1;
+
+       if (eventsel)
+               event_offsets[index] = offset;
+       else
+               count_offsets[index] = offset;
+
+       return offset;
+}
+
+static inline int amd_pmu_rdpmc_index(int index)
+{
+       int ret, first;
+
+       if (!index)
+               return index;
+
+       ret = rdpmc_indexes[index];
 
-       ret = x86_pmu_hw_config(event);
        if (ret)
                return ret;
 
-       if (has_branch_stack(event))
-               return -EOPNOTSUPP;
+       if (amd_nb_event_constraint &&
+           test_bit(index, amd_nb_event_constraint->idxmsk)) {
+               /*
+                * according to the mnual, ECX value of the NB counters is
+                * the index of the NB counter (0, 1, 2 or 3) plus 6
+                */
+
+               first = find_first_bit(amd_nb_event_constraint->idxmsk,
+                                      X86_PMC_IDX_MAX);
+               ret = index - first + 6;
+       } else
+               ret = index;
+
+       rdpmc_indexes[index] = ret;
+
+       return ret;
+}
 
+static int amd_core_hw_config(struct perf_event *event)
+{
        if (event->attr.exclude_host && event->attr.exclude_guest)
                /*
                 * When HO == GO == 1 the hardware treats that as GO == HO == 0
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event)
                event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
                                      ARCH_PERFMON_EVENTSEL_OS);
        else if (event->attr.exclude_host)
-               event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
+               event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
        else if (event->attr.exclude_guest)
-               event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
+               event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
+
+       return 0;
+}
+
+/*
+ * NB counters do not support the following event select bits:
+ *   Host/Guest only
+ *   Counter mask
+ *   Invert counter mask
+ *   Edge detect
+ *   OS/User mode
+ */
+static int amd_nb_hw_config(struct perf_event *event)
+{
+       /* for NB, we only allow system wide counting mode */
+       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+               return -EINVAL;
+
+       if (event->attr.exclude_user || event->attr.exclude_kernel ||
+           event->attr.exclude_host || event->attr.exclude_guest)
+               return -EINVAL;
 
-       if (event->attr.type != PERF_TYPE_RAW)
-               return 0;
+       event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
+                             ARCH_PERFMON_EVENTSEL_OS);
 
-       event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+       if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
+                                ARCH_PERFMON_EVENTSEL_INT))
+               return -EINVAL;
 
        return 0;
 }
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
        return (hwc->config & 0xe0) == 0xe0;
 }
 
+static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
+{
+       return amd_nb_event_constraint && amd_is_nb_event(hwc);
+}
+
 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
 {
        struct amd_nb *nb = cpuc->amd_nb;
@@ -188,19 +297,36 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc)
        return nb && nb->nb_id != -1;
 }
 
-static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
-                                     struct perf_event *event)
+static int amd_pmu_hw_config(struct perf_event *event)
+{
+       int ret;
+
+       /* pass precise event sampling to ibs: */
+       if (event->attr.precise_ip && get_ibs_caps())
+               return -ENOENT;
+
+       if (has_branch_stack(event))
+               return -EOPNOTSUPP;
+
+       ret = x86_pmu_hw_config(event);
+       if (ret)
+               return ret;
+
+       if (event->attr.type == PERF_TYPE_RAW)
+               event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+
+       if (amd_is_perfctr_nb_event(&event->hw))
+               return amd_nb_hw_config(event);
+
+       return amd_core_hw_config(event);
+}
+
+static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
+                                          struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
        struct amd_nb *nb = cpuc->amd_nb;
        int i;
 
-       /*
-        * only care about NB events
-        */
-       if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
-               return;
-
        /*
         * need to scan whole list because event may not have
         * been assigned during scheduling
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
        }
 }
 
+static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
+{
+       int core_id = cpu_data(smp_processor_id()).cpu_core_id;
+
+       /* deliver interrupts only to this core */
+       if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
+               hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
+               hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
+               hwc->config |= (u64)(core_id) <<
+                       AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
+       }
+}
+
  /*
   * AMD64 NorthBridge events need special treatment because
   * counter access needs to be synchronized across all cores
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
   *
   * Given that resources are allocated (cmpxchg), they must be
   * eventually freed for others to use. This is accomplished by
-  * calling amd_put_event_constraints().
+  * calling __amd_put_nb_event_constraints()
   *
   * Non NB events are not impacted by this restriction.
   */
 static struct event_constraint *
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
+                              struct event_constraint *c)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct amd_nb *nb = cpuc->amd_nb;
-       struct perf_event *old = NULL;
-       int max = x86_pmu.num_counters;
-       int i, j, k = -1;
+       struct perf_event *old;
+       int idx, new = -1;
 
-       /*
-        * if not NB event or no NB, then no constraints
-        */
-       if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
-               return &unconstrained;
+       if (!c)
+               c = &unconstrained;
+
+       if (cpuc->is_fake)
+               return c;
 
        /*
         * detect if already present, if so reuse
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
         * because of successive calls to x86_schedule_events() from
         * hw_perf_group_sched_in() without hw_perf_enable()
         */
-       for (i = 0; i < max; i++) {
-               /*
-                * keep track of first free slot
-                */
-               if (k == -1 && !nb->owners[i])
-                       k = i;
+       for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
+               if (new == -1 || hwc->idx == idx)
+                       /* assign free slot, prefer hwc->idx */
+                       old = cmpxchg(nb->owners + idx, NULL, event);
+               else if (nb->owners[idx] == event)
+                       /* event already present */
+                       old = event;
+               else
+                       continue;
+
+               if (old && old != event)
+                       continue;
+
+               /* reassign to this slot */
+               if (new != -1)
+                       cmpxchg(nb->owners + new, event, NULL);
+               new = idx;
 
                /* already present, reuse */
-               if (nb->owners[i] == event)
-                       goto done;
-       }
-       /*
-        * not present, so grab a new slot
-        * starting either at:
-        */
-       if (hwc->idx != -1) {
-               /* previous assignment */
-               i = hwc->idx;
-       } else if (k != -1) {
-               /* start from free slot found */
-               i = k;
-       } else {
-               /*
-                * event not found, no slot found in
-                * first pass, try again from the
-                * beginning
-                */
-               i = 0;
-       }
-       j = i;
-       do {
-               old = cmpxchg(nb->owners+i, NULL, event);
-               if (!old)
+               if (old == event)
                        break;
-               if (++i == max)
-                       i = 0;
-       } while (i != j);
-done:
-       if (!old)
-               return &nb->event_constraints[i];
-
-       return &emptyconstraint;
+       }
+
+       if (new == -1)
+               return &emptyconstraint;
+
+       if (amd_is_perfctr_nb_event(hwc))
+               amd_nb_interrupt_hw_config(hwc);
+
+       return &nb->event_constraints[new];
 }
 
 static struct amd_nb *amd_alloc_nb(int cpu)
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu)
        struct amd_nb *nb;
        int i, nb_id;
 
-       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+       cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        if (boot_cpu_data.x86_max_cores < 2)
                return;
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu)
        }
 }
 
+static struct event_constraint *
+amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
+{
+       /*
+        * if not NB event or no NB, then no constraints
+        */
+       if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
+               return &unconstrained;
+
+       return __amd_get_nb_event_constraints(cpuc, event,
+                                             amd_nb_event_constraint);
+}
+
+static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
+                                     struct perf_event *event)
+{
+       if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
+               __amd_put_nb_event_constraints(cpuc, event);
+}
+
 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
 
+static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
+static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
+
 static struct event_constraint *
 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
                        return &amd_f15_PMC20;
                }
        case AMD_EVENT_NB:
-               /* not yet implemented */
-               return &emptyconstraint;
+               return __amd_get_nb_event_constraints(cpuc, event,
+                                                     amd_nb_event_constraint);
        default:
                return &emptyconstraint;
        }
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = {
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
        .perfctr                = MSR_K7_PERFCTR0,
+       .addr_offset            = amd_pmu_addr_offset,
+       .rdpmc_index            = amd_pmu_rdpmc_index,
        .event_map              = amd_pmu_event_map,
        .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
        .num_counters           = AMD64_NUM_COUNTERS,
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = {
 
 static int setup_event_constraints(void)
 {
-       if (boot_cpu_data.x86 >= 0x15)
+       if (boot_cpu_data.x86 == 0x15)
                x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
        return 0;
 }
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void)
        return 0;
 }
 
+static int setup_perfctr_nb(void)
+{
+       if (!cpu_has_perfctr_nb)
+               return -ENODEV;
+
+       x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
+
+       if (cpu_has_perfctr_core)
+               amd_nb_event_constraint = &amd_NBPMC96;
+       else
+               amd_nb_event_constraint = &amd_NBPMC74;
+
+       printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
+
+       return 0;
+}
+
 __init int amd_pmu_init(void)
 {
        /* Performance-monitoring supported from K7 and later: */
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void)
 
        setup_event_constraints();
        setup_perfctr_core();
+       setup_perfctr_nb();
 
        /* Events are common for all AMDs */
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void)
         * SVM is disabled the Guest-only bits still gets set and the counter
         * will not count anything.
         */
-       cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
+       cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
 
        /* Reload all events */
        x86_pmu_disable_all();
index 93b9e1181f830ff91b9ad432e0d148a38b0e93c3..4914e94ad6e86a9565420dcac8d014ff679ab6fa 100644 (file)
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
                break;
 
        case 28: /* Atom */
-       case 54: /* Cedariew */
+       case 38: /* Lincroft */
+       case 39: /* Penwell */
+       case 53: /* Cloverview */
+       case 54: /* Cedarview */
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
                pr_cont("SandyBridge events, ");
                break;
        case 58: /* IvyBridge */
+       case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
index f2af39f5dc3d96916f8665350445a3e8155f290b..4820c232a0b91421d1ee77b194a8881a13143753 100644 (file)
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
 
 };
 
-static __initconst u64 p6_hw_cache_event_ids
+static u64 p6_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
index d22d0c4edcfd0ab0ef81322fbfe0ffd7e81a5d32..03a36321ec54428b098a3a874f7db83e4daafcfd 100644 (file)
@@ -33,6 +33,9 @@
 
 #define VMWARE_PORT_CMD_GETVERSION     10
 #define VMWARE_PORT_CMD_GETHZ          45
+#define VMWARE_PORT_CMD_GETVCPU_INFO   68
+#define VMWARE_PORT_CMD_LEGACY_X2APIC  3
+#define VMWARE_PORT_CMD_VCPU_RESERVED  31
 
 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx)                           \
        __asm__("inl (%%dx)" :                                          \
@@ -125,10 +128,20 @@ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
        set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
 }
 
+/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
+static bool __init vmware_legacy_x2apic_available(void)
+{
+       uint32_t eax, ebx, ecx, edx;
+       VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx);
+       return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 &&
+              (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
+}
+
 const __refconst struct hypervisor_x86 x86_hyper_vmware = {
        .name                   = "VMware",
        .detect                 = vmware_platform,
        .set_cpu_features       = vmware_set_cpu_features,
        .init_platform          = vmware_platform_setup,
+       .x2apic_available       = vmware_legacy_x2apic_available,
 };
 EXPORT_SYMBOL(x86_hyper_vmware);
index ff84d5469d772fd8ae8f5e0599e8072333eeb0c1..8831176aa5ef09d9ffd99976be4777cbf67bf505 100644 (file)
@@ -1065,7 +1065,6 @@ ENTRY(xen_failsafe_callback)
        lea 16(%esp),%esp
        CFI_ADJUST_CFA_OFFSET -16
        jz 5f
-       addl $16,%esp
        jmp iret_exc
 5:     pushl_cfi $-1 /* orig_ax = -1 => not a system call */
        SAVE_ALL
@@ -1092,11 +1091,18 @@ ENTRY(xen_failsafe_callback)
        _ASM_EXTABLE(4b,9b)
 ENDPROC(xen_failsafe_callback)
 
-BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
+BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
                xen_evtchn_do_upcall)
 
 #endif /* CONFIG_XEN */
 
+#if IS_ENABLED(CONFIG_HYPERV)
+
+BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+       hyperv_vector_handler)
+
+#endif /* CONFIG_HYPERV */
+
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 
index 07a7a04529bc5d7849ffc21b79819edd7b23ffd9..048f2240f8e62d222221d1e49b9215c5c512a553 100644 (file)
@@ -1454,11 +1454,16 @@ ENTRY(xen_failsafe_callback)
        CFI_ENDPROC
 END(xen_failsafe_callback)
 
-apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
+apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
        xen_hvm_callback_vector xen_evtchn_do_upcall
 
 #endif /* CONFIG_XEN */
 
+#if IS_ENABLED(CONFIG_HYPERV)
+apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
+       hyperv_callback_vector hyperv_vector_handler
+#endif /* CONFIG_HYPERV */
+
 /*
  * Some functions should be protected against kprobes
  */
@@ -1781,6 +1786,7 @@ first_nmi:
         * Leave room for the "copied" frame
         */
        subq $(5*8), %rsp
+       CFI_ADJUST_CFA_OFFSET 5*8
 
        /* Copy the stack frame to the Saved frame */
        .rept 5
@@ -1863,10 +1869,8 @@ end_repeat_nmi:
 nmi_swapgs:
        SWAPGS_UNSAFE_STACK
 nmi_restore:
-       RESTORE_ALL 8
-
-       /* Pop the extra iret frame */
-       addq $(5*8), %rsp
+       /* Pop the extra iret frame at once */
+       RESTORE_ALL 6*8
 
        /* Clear the NMI executing stack variable */
        movq $0, 5*8(%rsp)
index c18f59d10101cefc82f6638bcca758a480b70cc3..6773c918b8ccb641a02d54d3b11e3433b4b72915 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/io_apic.h>
 #include <asm/bios_ebda.h>
 #include <asm/tlbflush.h>
+#include <asm/bootparam_utils.h>
 
 static void __init i386_default_early_setup(void)
 {
@@ -30,6 +31,8 @@ static void __init i386_default_early_setup(void)
 
 void __init i386_start_kernel(void)
 {
+       sanitize_boot_params(&boot_params);
+
        memblock_reserve(__pa_symbol(&_text),
                         __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
 
index 037df57a99ac34d5ba1a5cab5abaa4554e0689d0..849fc9e63c2ffd875ba23d7bb2ea879ac756c30f 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/kdebug.h>
 #include <asm/e820.h>
 #include <asm/bios_ebda.h>
+#include <asm/bootparam_utils.h>
 
 static void __init zap_identity_mappings(void)
 {
@@ -46,6 +47,7 @@ static void __init copy_bootdata(char *real_mode_data)
        char * command_line;
 
        memcpy(&boot_params, real_mode_data, sizeof boot_params);
+       sanitize_boot_params(&boot_params);
        if (boot_params.hdr.cmd_line_ptr) {
                command_line = __va(boot_params.hdr.cmd_line_ptr);
                memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
index 8e7f6556028f7ff50d90c73eac405d3cdcb7b881..3c3f58a0808fb544987f70372d3ac202d2c75086 100644 (file)
@@ -300,37 +300,52 @@ ENTRY(startup_32_smp)
        leal -__PAGE_OFFSET(%ecx),%esp
 
 default_entry:
+#define CR0_STATE      (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
+                        X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
+                        X86_CR0_PG)
+       movl $(CR0_STATE & ~X86_CR0_PG),%eax
+       movl %eax,%cr0
+
+/*
+ * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave
+ * bits like NT set. This would confuse the debugger if this code is traced. So
+ * initialize them properly now before switching to protected mode. That means
+ * DF in particular (even though we have cleared it earlier after copying the
+ * command line) because GCC expects it.
+ */
+       pushl $0
+       popfl
+
 /*
- *     New page tables may be in 4Mbyte page mode and may
- *     be using the global pages. 
+ * New page tables may be in 4Mbyte page mode and may be using the global pages.
  *
- *     NOTE! If we are on a 486 we may have no cr4 at all!
- *     Specifically, cr4 exists if and only if CPUID exists
- *     and has flags other than the FPU flag set.
+ * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists
+ * if and only if CPUID exists and has flags other than the FPU flag set.
  */
+       movl $-1,pa(X86_CPUID)          # preset CPUID level
        movl $X86_EFLAGS_ID,%ecx
        pushl %ecx
-       popfl
+       popfl                           # set EFLAGS=ID
        pushfl
-       popl %eax
-       pushl $0
-       popfl
-       pushfl
-       popl %edx
-       xorl %edx,%eax
-       testl %ecx,%eax
-       jz 6f                   # No ID flag = no CPUID = no CR4
+       popl %eax                       # get EFLAGS
+       testl $X86_EFLAGS_ID,%eax       # did EFLAGS.ID remained set?
+       jz enable_paging                # hw disallowed setting of ID bit
+                                       # which means no CPUID and no CR4
+
+       xorl %eax,%eax
+       cpuid
+       movl %eax,pa(X86_CPUID)         # save largest std CPUID function
 
        movl $1,%eax
        cpuid
-       andl $~1,%edx           # Ignore CPUID.FPU
-       jz 6f                   # No flags or only CPUID.FPU = no CR4
+       andl $~1,%edx                   # Ignore CPUID.FPU
+       jz enable_paging                # No flags or only CPUID.FPU = no CR4
 
        movl pa(mmu_cr4_features),%eax
        movl %eax,%cr4
 
        testb $X86_CR4_PAE, %al         # check if PAE is enabled
-       jz 6f
+       jz enable_paging
 
        /* Check if extended functions are implemented */
        movl $0x80000000, %eax
@@ -338,7 +353,7 @@ default_entry:
        /* Value must be in the range 0x80000001 to 0x8000ffff */
        subl $0x80000001, %eax
        cmpl $(0x8000ffff-0x80000001), %eax
-       ja 6f
+       ja enable_paging
 
        /* Clear bogus XD_DISABLE bits */
        call verify_cpu
@@ -347,7 +362,7 @@ default_entry:
        cpuid
        /* Execute Disable bit supported? */
        btl $(X86_FEATURE_NX & 31), %edx
-       jnc 6f
+       jnc enable_paging
 
        /* Setup EFER (Extended Feature Enable Register) */
        movl $MSR_EFER, %ecx
@@ -357,29 +372,20 @@ default_entry:
        /* Make changes effective */
        wrmsr
 
-6:
+enable_paging:
 
 /*
  * Enable paging
  */
        movl $pa(initial_page_table), %eax
        movl %eax,%cr3          /* set the page table pointer.. */
-       movl %cr0,%eax
-       orl  $X86_CR0_PG,%eax
+       movl $CR0_STATE,%eax
        movl %eax,%cr0          /* ..and set paging (PG) bit */
        ljmp $__BOOT_CS,$1f     /* Clear prefetch and normalize %eip */
 1:
        /* Shift the stack pointer to a virtual address */
        addl $__PAGE_OFFSET, %esp
 
-/*
- * Initialize eflags.  Some BIOS's leave bits like NT set.  This would
- * confuse the debugger if this code is traced.
- * XXX - best to initialize before switching to protected mode.
- */
-       pushl $0
-       popfl
-
 /*
  * start system 32-bit setup. We need to re-do some of the things done
  * in 16-bit mode for the "real" operations.
@@ -389,31 +395,11 @@ default_entry:
        jz 1f                           # Did we do this already?
        call *%eax
 1:
-       
-/* check if it is 486 or 386. */
+
 /*
- * XXX - this does a lot of unnecessary setup.  Alignment checks don't
- * apply at our cpl of 0 and the stack ought to be aligned already, and
- * we don't need to preserve eflags.
+ * Check if it is 486
  */
-       movl $-1,X86_CPUID      # -1 for no CPUID initially
-       movb $3,X86             # at least 386
-       pushfl                  # push EFLAGS
-       popl %eax               # get EFLAGS
-       movl %eax,%ecx          # save original EFLAGS
-       xorl $0x240000,%eax     # flip AC and ID bits in EFLAGS
-       pushl %eax              # copy to EFLAGS
-       popfl                   # set EFLAGS
-       pushfl                  # get new EFLAGS
-       popl %eax               # put it in eax
-       xorl %ecx,%eax          # change in flags
-       pushl %ecx              # restore original EFLAGS
-       popfl
-       testl $0x40000,%eax     # check if AC bit changed
-       je is386
-
-       movb $4,X86             # at least 486
-       testl $0x200000,%eax    # check if ID bit changed
+       cmpl $-1,X86_CPUID
        je is486
 
        /* get vendor info */
@@ -439,11 +425,10 @@ default_entry:
        movb %cl,X86_MASK
        movl %edx,X86_CAPABILITY
 
-is486: movl $0x50022,%ecx      # set AM, WP, NE and MP
-       jmp 2f
-
-is386: movl $2,%ecx            # set MP
-2:     movl %cr0,%eax
+is486:
+       movb $4,X86
+       movl $0x50022,%ecx      # set AM, WP, NE and MP
+       movl %cr0,%eax
        andl $0x80000011,%eax   # Save PG,PE,ET
        orl %ecx,%eax
        movl %eax,%cr0
@@ -468,7 +453,6 @@ is386:      movl $2,%ecx            # set MP
        xorl %eax,%eax                  # Clear LDT
        lldt %ax
 
-       cld                     # gcc2 wants the direction flag cleared at all times
        pushl $0                # fake return address for unwinder
        jmp *(initial_code)
 
index e28670f9a589ad30d2654a9ed5983941a2c82cc9..da85a8e830a12d65e9d2f8a7f1db39b5477e9e85 100644 (file)
@@ -478,7 +478,7 @@ static int hpet_msi_next_event(unsigned long delta,
 
 static int hpet_setup_msi_irq(unsigned int irq)
 {
-       if (arch_setup_hpet_msi(irq, hpet_blockid)) {
+       if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
                destroy_irq(irq);
                return -EINVAL;
        }
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile
new file mode 100644 (file)
index 0000000..0d33169
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for kernel probes
+#
+
+obj-$(CONFIG_KPROBES)          += core.o
+obj-$(CONFIG_OPTPROBES)                += opt.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)        += ftrace.o
similarity index 90%
rename from arch/x86/kernel/kprobes-common.h
rename to arch/x86/kernel/kprobes/common.h
index 3230b68ef29aee96487bffa686649aed0cffcaf1..2e9d4b5af036abf5a86868cb706e479df3911d23 100644 (file)
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
        return addr;
 }
 #endif
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                          struct kprobe_ctlblk *kcb);
+#else
+static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                                 struct kprobe_ctlblk *kcb)
+{
+       return 0;
+}
+#endif
 #endif
similarity index 94%
rename from arch/x86/kernel/kprobes.c
rename to arch/x86/kernel/kprobes/core.c
index 57916c0d3cf6c21ff622a57077e56dec52469d9e..e124554598eeebdece1f360d7c4aaea38bf070fd 100644 (file)
@@ -58,7 +58,7 @@
 #include <asm/insn.h>
 #include <asm/debugreg.h>
 
-#include "kprobes-common.h"
+#include "common.h"
 
 void jprobe_return_end(void);
 
@@ -78,7 +78,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
         * Groups, and some special opcodes can not boost.
         * This is non-const and volatile to keep gcc from statically
         * optimizing it out, as variable_test_bit makes gcc think only
-        * *(unsigned long*) is used. 
+        * *(unsigned long*) is used.
         */
 static volatile u32 twobyte_is_boostable[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
@@ -117,7 +117,7 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
        struct __arch_relative_insn {
                u8 op;
                s32 raddr;
-       } __attribute__((packed)) *insn;
+       } __packed *insn;
 
        insn = (struct __arch_relative_insn *)from;
        insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
        return 1;
 }
 
-#ifdef KPROBES_CAN_USE_FTRACE
-static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                     struct kprobe_ctlblk *kcb)
-{
-       /*
-        * Emulate singlestep (and also recover regs->ip)
-        * as if there is a 5byte nop
-        */
-       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
-       if (unlikely(p->post_handler)) {
-               kcb->kprobe_status = KPROBE_HIT_SSDONE;
-               p->post_handler(p, regs, 0);
-       }
-       __this_cpu_write(current_kprobe, NULL);
-}
-#endif
-
 /*
  * Interrupts are disabled on entry as trap3 is an interrupt gate and they
  * remain disabled throughout this function.
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        } else if (kprobe_running()) {
                p = __this_cpu_read(current_kprobe);
                if (p->break_handler && p->break_handler(p, regs)) {
-#ifdef KPROBES_CAN_USE_FTRACE
-                       if (kprobe_ftrace(p)) {
-                               skip_singlestep(p, regs, kcb);
-                               return 1;
-                       }
-#endif
-                       setup_singlestep(p, regs, kcb, 0);
+                       if (!skip_singlestep(p, regs, kcb))
+                               setup_singlestep(p, regs, kcb, 0);
                        return 1;
                }
        } /* else: not a kprobe fault; let the kernel handle it */
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        return 0;
 }
 
-#ifdef KPROBES_CAN_USE_FTRACE
-/* Ftrace callback handler for kprobes */
-void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
-                                    struct ftrace_ops *ops, struct pt_regs *regs)
-{
-       struct kprobe *p;
-       struct kprobe_ctlblk *kcb;
-       unsigned long flags;
-
-       /* Disable irq for emulating a breakpoint and avoiding preempt */
-       local_irq_save(flags);
-
-       p = get_kprobe((kprobe_opcode_t *)ip);
-       if (unlikely(!p) || kprobe_disabled(p))
-               goto end;
-
-       kcb = get_kprobe_ctlblk();
-       if (kprobe_running()) {
-               kprobes_inc_nmissed_count(p);
-       } else {
-               /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
-               regs->ip = ip + sizeof(kprobe_opcode_t);
-
-               __this_cpu_write(current_kprobe, p);
-               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               if (!p->pre_handler || !p->pre_handler(p, regs))
-                       skip_singlestep(p, regs, kcb);
-               /*
-                * If pre_handler returns !0, it sets regs->ip and
-                * resets current kprobe.
-                */
-       }
-end:
-       local_irq_restore(flags);
-}
-
-int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
-{
-       p->ainsn.insn = NULL;
-       p->ainsn.boostable = -1;
-       return 0;
-}
-#endif
-
 int __init arch_init_kprobes(void)
 {
        return arch_init_optprobes();
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
new file mode 100644 (file)
index 0000000..23ef5c5
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/ftrace.h>
+
+#include "common.h"
+
+static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                            struct kprobe_ctlblk *kcb)
+{
+       /*
+        * Emulate singlestep (and also recover regs->ip)
+        * as if there is a 5byte nop
+        */
+       regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
+       if (unlikely(p->post_handler)) {
+               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+               p->post_handler(p, regs, 0);
+       }
+       __this_cpu_write(current_kprobe, NULL);
+       return 1;
+}
+
+int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+                             struct kprobe_ctlblk *kcb)
+{
+       if (kprobe_ftrace(p))
+               return __skip_singlestep(p, regs, kcb);
+       else
+               return 0;
+}
+
+/* Ftrace callback handler for kprobes */
+void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+                                    struct ftrace_ops *ops, struct pt_regs *regs)
+{
+       struct kprobe *p;
+       struct kprobe_ctlblk *kcb;
+       unsigned long flags;
+
+       /* Disable irq for emulating a breakpoint and avoiding preempt */
+       local_irq_save(flags);
+
+       p = get_kprobe((kprobe_opcode_t *)ip);
+       if (unlikely(!p) || kprobe_disabled(p))
+               goto end;
+
+       kcb = get_kprobe_ctlblk();
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(p);
+       } else {
+               /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+               regs->ip = ip + sizeof(kprobe_opcode_t);
+
+               __this_cpu_write(current_kprobe, p);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+               if (!p->pre_handler || !p->pre_handler(p, regs))
+                       __skip_singlestep(p, regs, kcb);
+               /*
+                * If pre_handler returns !0, it sets regs->ip and
+                * resets current kprobe.
+                */
+       }
+end:
+       local_irq_restore(flags);
+}
+
+int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+       p->ainsn.insn = NULL;
+       p->ainsn.boostable = -1;
+       return 0;
+}
similarity index 99%
rename from arch/x86/kernel/kprobes-opt.c
rename to arch/x86/kernel/kprobes/opt.c
index c5e410eed4039464faf98767dfb41b9793ee061f..76dc6f09572466426268955fbb11fdc845dca040 100644 (file)
@@ -37,7 +37,7 @@
 #include <asm/insn.h>
 #include <asm/debugreg.h>
 
-#include "kprobes-common.h"
+#include "common.h"
 
 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
 {
index 08b973f64032ba31f5e91a0558e2dc003904531a..2b44ea5f269dbb3b22b649b5461163195653da80 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/apicdef.h>
 #include <asm/hypervisor.h>
 #include <asm/kvm_guest.h>
+#include <asm/context_tracking.h>
 
 static int kvmapf = 1;
 
@@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token)
        struct kvm_task_sleep_node n, *e;
        DEFINE_WAIT(wait);
 
+       rcu_irq_enter();
+
        spin_lock(&b->lock);
        e = _find_apf_task(b, token);
        if (e) {
@@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token)
                hlist_del(&e->link);
                kfree(e);
                spin_unlock(&b->lock);
+
+               rcu_irq_exit();
                return;
        }
 
@@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token)
                        /*
                         * We cannot reschedule. So halt.
                         */
+                       rcu_irq_exit();
                        native_safe_halt();
+                       rcu_irq_enter();
                        local_irq_disable();
                }
        }
        if (!n.halted)
                finish_wait(&n.wq, &wait);
 
+       rcu_irq_exit();
        return;
 }
 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
@@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
                /* page is swapped out by the host. */
-               rcu_irq_enter();
+               exception_enter(regs);
                exit_idle();
                kvm_async_pf_task_wait((u32)read_cr2());
-               rcu_irq_exit();
+               exception_exit(regs);
                break;
        case KVM_PV_REASON_PAGE_READY:
                rcu_irq_enter();
@@ -497,6 +505,7 @@ static bool __init kvm_detect(void)
 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
        .name                   = "KVM",
        .detect                 = kvm_detect,
+       .x2apic_available       = kvm_para_available,
 };
 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
 
index a7c5661f84962a2f6eb80509c1791392352a6fc8..4929502c1372db979d7e1b176b22a9981e8ec96e 100644 (file)
@@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file)
        unsigned int cpu;
        struct cpuinfo_x86 *c;
 
+       if (!capable(CAP_SYS_RAWIO))
+               return -EPERM;
+
        cpu = iminor(file->f_path.dentry->d_inode);
        if (cpu >= nr_cpu_ids || !cpu_online(cpu))
                return -ENXIO;  /* No such CPU */
index 0f5dec5c80e0fccbba37474463e724970503faf2..872079a67e4d262151dfdbe74f537033be5ebcc0 100644 (file)
@@ -56,7 +56,7 @@ struct device x86_dma_fallback_dev = {
 EXPORT_SYMBOL(x86_dma_fallback_dev);
 
 /* Number of entries preallocated for DMA-API debugging */
-#define PREALLOC_DMA_DEBUG_ENTRIES       32768
+#define PREALLOC_DMA_DEBUG_ENTRIES       65536
 
 int dma_set_mask(struct device *dev, u64 mask)
 {
index b629bbe0d9bdeee5a416a8a6390167e595cf6036..29a8120e6fe88a88012490a51130be8b89ea2193 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/perf_event.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/rcupdate.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/context_tracking.h>
 
 #include <asm/uaccess.h>
index 4e8ba39eaf0fd93cbca557eaecf3efcd954ee05b..76fa1e9a2b39399b1944e4a13c68a054c9a5c386 100644 (file)
@@ -584,7 +584,7 @@ static void native_machine_emergency_restart(void)
                        break;
 
                case BOOT_EFI:
-                       if (efi_enabled)
+                       if (efi_enabled(EFI_RUNTIME_SERVICES))
                                efi.reset_system(reboot_mode ?
                                                 EFI_RESET_WARM :
                                                 EFI_RESET_COLD,
index 801602b5d745adeb1eb892294588bbcb8de1a88d..2e8f3d3b56410a0eb10bbc0fe1d18877a46ff2fc 100644 (file)
@@ -149,7 +149,6 @@ unsigned long mach_get_cmos_time(void)
        if (century) {
                century = bcd2bin(century);
                year += century * 100;
-               printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
        } else
                year += CMOS_YEARS_OFFS;
 
index 23ddd558fbd52d1376576b200122448d94165708..8b24289cc10c0f2239623918e587810b81c43c9b 100644 (file)
@@ -610,6 +610,83 @@ static __init void reserve_ibft_region(void)
 
 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
 
+static bool __init snb_gfx_workaround_needed(void)
+{
+#ifdef CONFIG_PCI
+       int i;
+       u16 vendor, devid;
+       static const __initconst u16 snb_ids[] = {
+               0x0102,
+               0x0112,
+               0x0122,
+               0x0106,
+               0x0116,
+               0x0126,
+               0x010a,
+       };
+
+       /* Assume no if something weird is going on with PCI */
+       if (!early_pci_allowed())
+               return false;
+
+       vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
+       if (vendor != 0x8086)
+               return false;
+
+       devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
+       for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
+               if (devid == snb_ids[i])
+                       return true;
+#endif
+
+       return false;
+}
+
+/*
+ * Sandy Bridge graphics has trouble with certain ranges, exclude
+ * them from allocation.
+ */
+static void __init trim_snb_memory(void)
+{
+       static const __initconst unsigned long bad_pages[] = {
+               0x20050000,
+               0x20110000,
+               0x20130000,
+               0x20138000,
+               0x40004000,
+       };
+       int i;
+
+       if (!snb_gfx_workaround_needed())
+               return;
+
+       printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
+
+       /*
+        * Reserve all memory below the 1 MB mark that has not
+        * already been reserved.
+        */
+       memblock_reserve(0, 1<<20);
+       
+       for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
+               if (memblock_reserve(bad_pages[i], PAGE_SIZE))
+                       printk(KERN_WARNING "failed to reserve 0x%08lx\n",
+                              bad_pages[i]);
+       }
+}
+
+/*
+ * Here we put platform-specific memory range workarounds, i.e.
+ * memory known to be corrupt or otherwise in need to be reserved on
+ * specific platforms.
+ *
+ * If this gets used more widely it could use a real dispatch mechanism.
+ */
+static void __init trim_platform_memory_ranges(void)
+{
+       trim_snb_memory();
+}
+
 static void __init trim_bios_range(void)
 {
        /*
@@ -630,6 +707,7 @@ static void __init trim_bios_range(void)
         * take them out.
         */
        e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
+
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 }
 
@@ -729,15 +807,15 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_EFI
        if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
                     "EL32", 4)) {
-               efi_enabled = 1;
-               efi_64bit = false;
+               set_bit(EFI_BOOT, &x86_efi_facility);
        } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
                     "EL64", 4)) {
-               efi_enabled = 1;
-               efi_64bit = true;
+               set_bit(EFI_BOOT, &x86_efi_facility);
+               set_bit(EFI_64BIT, &x86_efi_facility);
        }
-       if (efi_enabled && efi_memblock_x86_reserve_range())
-               efi_enabled = 0;
+
+       if (efi_enabled(EFI_BOOT))
+               efi_memblock_x86_reserve_range();
 #endif
 
        x86_init.oem.arch_setup();
@@ -810,7 +888,7 @@ void __init setup_arch(char **cmdline_p)
 
        finish_e820_parsing();
 
-       if (efi_enabled)
+       if (efi_enabled(EFI_BOOT))
                efi_init();
 
        dmi_scan_machine();
@@ -893,7 +971,7 @@ void __init setup_arch(char **cmdline_p)
         * The EFI specification says that boot service code won't be called
         * after ExitBootServices(). This is, in fact, a lie.
         */
-       if (efi_enabled)
+       if (efi_enabled(EFI_MEMMAP))
                efi_reserve_boot_services();
 
        /* preallocate 4k for mptable mpc */
@@ -908,6 +986,8 @@ void __init setup_arch(char **cmdline_p)
 
        setup_real_mode();
 
+       trim_platform_memory_ranges();
+
        init_gbpages();
 
        /* max_pfn_mapped is updated here */
@@ -1034,7 +1114,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
-       if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+       if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
                conswitchp = &vga_con;
 #elif defined(CONFIG_DUMMY_CONSOLE)
        conswitchp = &dummy_con;
@@ -1051,14 +1131,14 @@ void __init setup_arch(char **cmdline_p)
        register_refined_jiffies(CLOCK_TICK_RATE);
 
 #ifdef CONFIG_EFI
-       /* Once setup is done above, disable efi_enabled on mismatched
-        * firmware/kernel archtectures since there is no support for
-        * runtime services.
+       /* Once setup is done above, unmap the EFI memory map on
+        * mismatched firmware/kernel archtectures since there is no
+        * support for runtime services.
         */
-       if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
+       if (efi_enabled(EFI_BOOT) &&
+           IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
                pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
                efi_unmap_memmap();
-               efi_enabled = 0;
        }
 #endif
 }
index cd3b2438a9800b8568b0d79788ef3fdb438aa1ca..9b4d51d0c0d013274f7ba46c2e58319f0d1d9145 100644 (file)
@@ -165,10 +165,11 @@ void set_task_blockstep(struct task_struct *task, bool on)
         * Ensure irq/preemption can't change debugctl in between.
         * Note also that both TIF_BLOCKSTEP and debugctl should
         * be changed atomically wrt preemption.
-        * FIXME: this means that set/clear TIF_BLOCKSTEP is simply
-        * wrong if task != current, SIGKILL can wakeup the stopped
-        * tracee and set/clear can play with the running task, this
-        * can confuse the next __switch_to_xtra().
+        *
+        * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
+        * task is current or it can't be running, otherwise we can race
+        * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
+        * PTRACE_KILL is not safe.
         */
        local_irq_disable();
        debugctl = get_debugctlmsr();
index 97ef74b88e0f8f6213308c6168303779302adec0..dbded5aedb818d511a46967ccb1d26d77233110f 100644 (file)
@@ -157,7 +157,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        if (flags & MAP_FIXED)
                return addr;
 
-       /* for MAP_32BIT mappings we force the legact mmap base */
+       /* for MAP_32BIT mappings we force the legacy mmap base */
        if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
                goto bottomup;
 
index 06ccb5073a3f7280f1f71b20cbf7edc68d7d278c..4b9ea101fe3b2923744b5b0b5b61c15ff1c3f296 100644 (file)
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
        ns_now = __cycles_2_ns(tsc_now);
 
        if (cpu_khz) {
-               *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
+               *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
+                               cpu_khz / 2) / cpu_khz;
                *offset = ns_now - mult_frac(tsc_now, *scale,
                                             (1UL << CYC2NS_SCALE_FACTOR));
        }
index c71025b674623133219377a70b1e27f380a95ffa..0ba4cfb4f412dcc960e26107083535bf1ba10181 100644 (file)
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
                if (auprobe->insn[i] == 0x66)
                        continue;
 
-               if (auprobe->insn[i] == 0x90)
+               if (auprobe->insn[i] == 0x90) {
+                       regs->ip += i + 1;
                        return true;
+               }
 
                break;
        }
index 7a3d075a814a9c83a603df93b5961f247049e624..d065d67c267230fcb386250fd1261e95c521cae8 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/time.h>
 #include <asm/irq.h>
 #include <asm/io_apic.h>
+#include <asm/hpet.h>
 #include <asm/pat.h>
 #include <asm/tsc.h>
 #include <asm/iommu.h>
@@ -111,15 +112,22 @@ struct x86_platform_ops x86_platform = {
 
 EXPORT_SYMBOL_GPL(x86_platform);
 struct x86_msi_ops x86_msi = {
-       .setup_msi_irqs = native_setup_msi_irqs,
-       .teardown_msi_irq = native_teardown_msi_irq,
-       .teardown_msi_irqs = default_teardown_msi_irqs,
-       .restore_msi_irqs = default_restore_msi_irqs,
+       .setup_msi_irqs         = native_setup_msi_irqs,
+       .compose_msi_msg        = native_compose_msi_msg,
+       .teardown_msi_irq       = native_teardown_msi_irq,
+       .teardown_msi_irqs      = default_teardown_msi_irqs,
+       .restore_msi_irqs       = default_restore_msi_irqs,
+       .setup_hpet_msi         = default_setup_hpet_msi,
 };
 
 struct x86_io_apic_ops x86_io_apic_ops = {
-       .init   = native_io_apic_init_mappings,
-       .read   = native_io_apic_read,
-       .write  = native_io_apic_write,
-       .modify = native_io_apic_modify,
+       .init                   = native_io_apic_init_mappings,
+       .read                   = native_io_apic_read,
+       .write                  = native_io_apic_write,
+       .modify                 = native_io_apic_modify,
+       .disable                = native_disable_io_apic,
+       .print_entries          = native_io_apic_print_entries,
+       .set_affinity           = native_ioapic_set_affinity,
+       .setup_entry            = native_setup_ioapic_entry,
+       .eoi_ioapic_pin         = native_eoi_ioapic_pin,
 };
index 76f54461f7cb83a13a0ec8e76add8968532a1eae..c243b81e3c74b56bb69d7d26e692ac4181d73320 100644 (file)
@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
 };
 
 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
-static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
+static struct kvm_shared_msrs __percpu *shared_msrs;
 
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
 
 static void shared_msr_update(unsigned slot, u32 msr)
 {
-       struct kvm_shared_msrs *smsr;
        u64 value;
+       unsigned int cpu = smp_processor_id();
+       struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
 
-       smsr = &__get_cpu_var(shared_msrs);
        /* only read, and nobody should modify it at this time,
         * so don't need lock */
        if (slot >= shared_msrs_global.nr) {
@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
 
 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
 {
-       struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
+       unsigned int cpu = smp_processor_id();
+       struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
 
        if (((value ^ smsr->values[slot].curr) & mask) == 0)
                return;
@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
 
 static void drop_user_return_notifiers(void *ignore)
 {
-       struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
+       unsigned int cpu = smp_processor_id();
+       struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
 
        if (smsr->registered)
                kvm_on_user_return(&smsr->urn);
@@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque)
                goto out;
        }
 
+       r = -ENOMEM;
+       shared_msrs = alloc_percpu(struct kvm_shared_msrs);
+       if (!shared_msrs) {
+               printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
+               goto out;
+       }
+
        r = kvm_mmu_module_init();
        if (r)
-               goto out;
+               goto out_free_percpu;
 
        kvm_set_mmio_spte_mask();
        kvm_init_msr_list();
@@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque)
 
        return 0;
 
+out_free_percpu:
+       free_percpu(shared_msrs);
 out:
        return r;
 }
@@ -5275,6 +5286,7 @@ void kvm_arch_exit(void)
 #endif
        kvm_x86_ops = NULL;
        kvm_mmu_module_exit();
+       free_percpu(shared_msrs);
 }
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
index 027088f2f7dd9b6b836abfe410088579106a7324..fb674fd3fc2258f75400564acea0a835430f5988 100644 (file)
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
                                return;
                }
 #endif
+               /* Kernel addresses are always protection faults: */
+               if (address >= TASK_SIZE)
+                       error_code |= PF_PROT;
 
-               if (unlikely(show_unhandled_signals))
+               if (likely(show_unhandled_signals))
                        show_signal_msg(regs, error_code, address, tsk);
 
-               /* Kernel addresses are always protection faults: */
                tsk->thread.cr2         = address;
-               tsk->thread.error_code  = error_code | (address >= TASK_SIZE);
+               tsk->thread.error_code  = error_code;
                tsk->thread.trap_nr     = X86_TRAP_PF;
 
                force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
index 2ead3c8a4c8419da92a61fbba35eb695e5205dde..d6eeead437584c0afcdcf1e8e3c8963b20a77192 100644 (file)
@@ -605,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
        }
 
        if (pgd_changed)
-               sync_global_pgds(addr, end);
+               sync_global_pgds(addr, end - 1);
 
        __flush_tlb_all();
 
@@ -831,6 +831,9 @@ int kern_addr_valid(unsigned long addr)
        if (pud_none(*pud))
                return 0;
 
+       if (pud_large(*pud))
+               return pfn_valid(pud_pfn(*pud));
+
        pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd))
                return 0;
@@ -981,7 +984,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
                }
 
        }
-       sync_global_pgds((unsigned long)start_page, end);
+       sync_global_pgds((unsigned long)start_page, end - 1);
        return 0;
 }
 
index c80b9fb95734fd07430fde185794e4182ec7a6e8..8dabbed409ee69d58e9752be88b5ddaa38a02093 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/memblock.h>
 
 static u64 patterns[] __initdata = {
+       /* The first entry has to be 0 to leave memtest with zeroed memory */
        0,
        0xffffffffffffffffULL,
        0x5555555555555555ULL,
@@ -110,15 +111,8 @@ void __init early_memtest(unsigned long start, unsigned long end)
                return;
 
        printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
-       for (i = 0; i < memtest_pattern; i++) {
+       for (i = memtest_pattern-1; i < UINT_MAX; --i) {
                idx = i % ARRAY_SIZE(patterns);
                do_one_pass(patterns[idx], start, end);
        }
-
-       if (idx > 0) {
-               printk(KERN_INFO "early_memtest: wipe out "
-                      "test pattern from memory\n");
-               /* additional test with pattern 0 will do this */
-               do_one_pass(0, start, end);
-       }
 }
index 4ddf497ca65beae876d17867d4cd7d32cd37f39b..cdd0da9dd530b643c5c8f8673f7ae76c5fb8602c 100644 (file)
@@ -149,39 +149,40 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
        int node, pxm;
 
        if (srat_disabled())
-               return -1;
-       if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
-               bad_srat();
-               return -1;
-       }
+               goto out_err;
+       if (ma->header.length != sizeof(struct acpi_srat_mem_affinity))
+               goto out_err_bad_srat;
        if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
-               return -1;
-
+               goto out_err;
        if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
-               return -1;
+               goto out_err;
+
        start = ma->base_address;
        end = start + ma->length;
        pxm = ma->proximity_domain;
        if (acpi_srat_revision <= 1)
                pxm &= 0xff;
+
        node = setup_node(pxm);
        if (node < 0) {
                printk(KERN_ERR "SRAT: Too many proximity domains.\n");
-               bad_srat();
-               return -1;
+               goto out_err_bad_srat;
        }
 
-       if (numa_add_memblk(node, start, end) < 0) {
-               bad_srat();
-               return -1;
-       }
+       if (numa_add_memblk(node, start, end) < 0)
+               goto out_err_bad_srat;
 
        node_set(node, numa_nodes_parsed);
 
        printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
               node, pxm,
               (unsigned long long) start, (unsigned long long) end - 1);
+
        return 0;
+out_err_bad_srat:
+       bad_srat();
+out_err:
+       return -1;
 }
 
 void __init acpi_numa_arch_fixup(void) {}
index 13a6b29e2e5d27041bc5657dd3263302d75b9547..282375f13c7edddf30cba2f57202790cc8cd60a4 100644 (file)
@@ -335,7 +335,7 @@ static const struct file_operations fops_tlbflush = {
        .llseek = default_llseek,
 };
 
-static int __cpuinit create_tlb_flushall_shift(void)
+static int __init create_tlb_flushall_shift(void)
 {
        debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
                            arch_debugfs_dir, NULL, &fops_tlbflush);
index fb29968a7cd54c164286a09f9f8721144180f3d0..082e88129712b4eb9e2027852c890a02ff31a1c7 100644 (file)
@@ -548,8 +548,7 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
        if (cfg->address < 0xFFFFFFFF)
                return 0;
 
-       if (!strcmp(mcfg->header.oem_id, "SGI") ||
-                       !strcmp(mcfg->header.oem_id, "SGI2"))
+       if (!strncmp(mcfg->header.oem_id, "SGI", 3))
                return 0;
 
        if (mcfg->header.revision >= 1) {
index 8d874396cb29044b15ccf8634259e239cc9f957b..01e0231a113e9375e0e4f6c44ca17aac83d3eda4 100644 (file)
@@ -2,10 +2,12 @@
 obj-y  += ce4100/
 obj-y  += efi/
 obj-y  += geode/
+obj-y  += goldfish/
 obj-y  += iris/
 obj-y  += mrst/
 obj-y  += olpc/
 obj-y  += scx200/
 obj-y  += sfi/
+obj-y  += ts5500/
 obj-y  += visws/
 obj-y  += uv/
index d9c1b95af17c1ead797783eed225c94c9aa7fe5f..7145ec63c5205f710a9f28bc676008e899878d76 100644 (file)
  * published by the Free Software Foundation.
  */
 #include <linux/kernel.h>
+#include <linux/init.h>
 #include <linux/acpi.h>
 #include <linux/efi.h>
 #include <linux/efi-bgrt.h>
 
 struct acpi_table_bgrt *bgrt_tab;
-void *bgrt_image;
-size_t bgrt_image_size;
+void *__initdata bgrt_image;
+size_t __initdata bgrt_image_size;
 
 struct bmp_header {
        u16 id;
        u32 size;
 } __packed;
 
-void efi_bgrt_init(void)
+void __init efi_bgrt_init(void)
 {
        acpi_status status;
        void __iomem *image;
index ad4439145f858314dfe518cf7cd9336c5fe9c96d..928bf837040a273f673e6bcb9b7f017284985a7b 100644 (file)
@@ -51,9 +51,6 @@
 
 #define EFI_DEBUG      1
 
-int efi_enabled;
-EXPORT_SYMBOL(efi_enabled);
-
 struct efi __read_mostly efi = {
        .mps        = EFI_INVALID_TABLE_ADDR,
        .acpi       = EFI_INVALID_TABLE_ADDR,
@@ -69,19 +66,28 @@ EXPORT_SYMBOL(efi);
 
 struct efi_memory_map memmap;
 
-bool efi_64bit;
-
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
 static inline bool efi_is_native(void)
 {
-       return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
+       return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
+}
+
+unsigned long x86_efi_facility;
+
+/*
+ * Returns 1 if 'facility' is enabled, 0 otherwise.
+ */
+int efi_enabled(int facility)
+{
+       return test_bit(facility, &x86_efi_facility) != 0;
 }
+EXPORT_SYMBOL(efi_enabled);
 
 static int __init setup_noefi(char *arg)
 {
-       efi_enabled = 0;
+       clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
        return 0;
 }
 early_param("noefi", setup_noefi);
@@ -426,6 +432,7 @@ void __init efi_reserve_boot_services(void)
 
 void __init efi_unmap_memmap(void)
 {
+       clear_bit(EFI_MEMMAP, &x86_efi_facility);
        if (memmap.map) {
                early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
                memmap.map = NULL;
@@ -460,7 +467,7 @@ void __init efi_free_boot_services(void)
 
 static int __init efi_systab_init(void *phys)
 {
-       if (efi_64bit) {
+       if (efi_enabled(EFI_64BIT)) {
                efi_system_table_64_t *systab64;
                u64 tmp = 0;
 
@@ -552,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
        void *config_tables, *tablep;
        int i, sz;
 
-       if (efi_64bit)
+       if (efi_enabled(EFI_64BIT))
                sz = sizeof(efi_config_table_64_t);
        else
                sz = sizeof(efi_config_table_32_t);
@@ -572,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
                efi_guid_t guid;
                unsigned long table;
 
-               if (efi_64bit) {
+               if (efi_enabled(EFI_64BIT)) {
                        u64 table64;
                        guid = ((efi_config_table_64_t *)tablep)->guid;
                        table64 = ((efi_config_table_64_t *)tablep)->table;
@@ -684,7 +691,6 @@ void __init efi_init(void)
        if (boot_params.efi_info.efi_systab_hi ||
            boot_params.efi_info.efi_memmap_hi) {
                pr_info("Table located above 4GB, disabling EFI.\n");
-               efi_enabled = 0;
                return;
        }
        efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
@@ -694,10 +700,10 @@ void __init efi_init(void)
                          ((__u64)boot_params.efi_info.efi_systab_hi<<32));
 #endif
 
-       if (efi_systab_init(efi_phys.systab)) {
-               efi_enabled = 0;
+       if (efi_systab_init(efi_phys.systab))
                return;
-       }
+
+       set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
 
        /*
         * Show what we know for posterity
@@ -715,10 +721,10 @@ void __init efi_init(void)
                efi.systab->hdr.revision >> 16,
                efi.systab->hdr.revision & 0xffff, vendor);
 
-       if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) {
-               efi_enabled = 0;
+       if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
                return;
-       }
+
+       set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
 
        /*
         * Note: We currently don't support runtime services on an EFI
@@ -727,15 +733,17 @@ void __init efi_init(void)
 
        if (!efi_is_native())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
-       else if (efi_runtime_init()) {
-               efi_enabled = 0;
-               return;
+       else {
+               if (efi_runtime_init())
+                       return;
+               set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
        }
 
-       if (efi_memmap_init()) {
-               efi_enabled = 0;
+       if (efi_memmap_init())
                return;
-       }
+
+       set_bit(EFI_MEMMAP, &x86_efi_facility);
+
 #ifdef CONFIG_X86_32
        if (efi_is_native()) {
                x86_platform.get_wallclock = efi_get_time;
@@ -941,7 +949,7 @@ void __init efi_enter_virtual_mode(void)
         *
         * Call EFI services through wrapper functions.
         */
-       efi.runtime_version = efi_systab.fw_revision;
+       efi.runtime_version = efi_systab.hdr.revision;
        efi.get_time = virt_efi_get_time;
        efi.set_time = virt_efi_set_time;
        efi.get_wakeup_time = virt_efi_get_wakeup_time;
@@ -969,6 +977,9 @@ u32 efi_mem_type(unsigned long phys_addr)
        efi_memory_desc_t *md;
        void *p;
 
+       if (!efi_enabled(EFI_MEMMAP))
+               return 0;
+
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
                if ((md->phys_addr <= phys_addr) &&
index 95fd505dfeb6e43dd37b0c41954f0b0db6535d1c..2b2003860615fbc5e24ef12e1c1fa3d855a1f09d 100644 (file)
@@ -38,7 +38,7 @@
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 
-static pgd_t save_pgd __initdata;
+static pgd_t *save_pgd __initdata;
 static unsigned long efi_flags __initdata;
 
 static void __init early_code_mapping_set_exec(int executable)
@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
 void __init efi_call_phys_prelog(void)
 {
        unsigned long vaddress;
+       int pgd;
+       int n_pgds;
 
        early_code_mapping_set_exec(1);
        local_irq_save(efi_flags);
-       vaddress = (unsigned long)__va(0x0UL);
-       save_pgd = *pgd_offset_k(0x0UL);
-       set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress));
+
+       n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
+       save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
+
+       for (pgd = 0; pgd < n_pgds; pgd++) {
+               save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
+               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+       }
        __flush_tlb_all();
 }
 
@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
        /*
         * After the lock is released, the original page table is restored.
         */
-       set_pgd(pgd_offset_k(0x0UL), save_pgd);
+       int pgd;
+       int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+       for (pgd = 0; pgd < n_pgds; pgd++)
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
+       kfree(save_pgd);
        __flush_tlb_all();
        local_irq_restore(efi_flags);
        early_code_mapping_set_exec(0);
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile
new file mode 100644 (file)
index 0000000..f030b53
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_GOLDFISH) += goldfish.o
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
new file mode 100644 (file)
index 0000000..1693107
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2011 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+/*
+ * Where in virtual device memory the IO devices (timers, system controllers
+ * and so on)
+ */
+
+#define GOLDFISH_PDEV_BUS_BASE (0xff001000)
+#define GOLDFISH_PDEV_BUS_END  (0xff7fffff)
+#define GOLDFISH_PDEV_BUS_IRQ  (4)
+
+#define GOLDFISH_TTY_BASE      (0x2000)
+
+static struct resource goldfish_pdev_bus_resources[] = {
+       {
+               .start  = GOLDFISH_PDEV_BUS_BASE,
+               .end    = GOLDFISH_PDEV_BUS_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       {
+               .start  = GOLDFISH_PDEV_BUS_IRQ,
+               .end    = GOLDFISH_PDEV_BUS_IRQ,
+               .flags  = IORESOURCE_IRQ,
+       }
+};
+
+static int __init goldfish_init(void)
+{
+       platform_device_register_simple("goldfish_pdev_bus", -1,
+                                               goldfish_pdev_bus_resources, 2);
+       return 0;
+}
+device_initcall(goldfish_init);
index 7785b72ecc3aeb619b2e2445dd6a7e76623fd48c..bcd1a703e3e60f0dcc9ebad4bcb1d8cf27d3fd5f 100644 (file)
@@ -35,7 +35,7 @@
 static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
 
 /* All CPUs enumerated by SFI must be present and enabled */
-static void __cpuinit mp_sfi_register_lapic(u8 id)
+static void __init mp_sfi_register_lapic(u8 id)
 {
        if (MAX_LOCAL_APIC - id <= 0) {
                pr_warning("Processor #%d invalid (max %d)\n",
diff --git a/arch/x86/platform/ts5500/Makefile b/arch/x86/platform/ts5500/Makefile
new file mode 100644 (file)
index 0000000..c54e348
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_TS5500)   += ts5500.o
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
new file mode 100644 (file)
index 0000000..39febb2
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * Technologic Systems TS-5500 Single Board Computer support
+ *
+ * Copyright (C) 2013 Savoir-faire Linux Inc.
+ *     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ *
+ * This driver registers the Technologic Systems TS-5500 Single Board Computer
+ * (SBC) and its devices, and exposes information to userspace such as jumpers'
+ * state or available options. For further information about sysfs entries, see
+ * Documentation/ABI/testing/sysfs-platform-ts5500.
+ *
+ * This code actually supports the TS-5500 platform, but it may be extended to
+ * support similar Technologic Systems x86-based platforms, such as the TS-5600.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_data/gpio-ts5500.h>
+#include <linux/platform_data/max197.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Product code register */
+#define TS5500_PRODUCT_CODE_ADDR       0x74
+#define TS5500_PRODUCT_CODE            0x60    /* TS-5500 product code */
+
+/* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */
+#define TS5500_SRAM_RS485_ADC_ADDR     0x75
+#define TS5500_SRAM                    BIT(0)  /* SRAM option */
+#define TS5500_RS485                   BIT(1)  /* RS-485 option */
+#define TS5500_ADC                     BIT(2)  /* A/D converter option */
+#define TS5500_RS485_RTS               BIT(6)  /* RTS for RS-485 */
+#define TS5500_RS485_AUTO              BIT(7)  /* Automatic RS-485 */
+
+/* External Reset/Industrial Temperature Range options register */
+#define TS5500_ERESET_ITR_ADDR         0x76
+#define TS5500_ERESET                  BIT(0)  /* External Reset option */
+#define TS5500_ITR                     BIT(1)  /* Indust. Temp. Range option */
+
+/* LED/Jumpers register */
+#define TS5500_LED_JP_ADDR             0x77
+#define TS5500_LED                     BIT(0)  /* LED flag */
+#define TS5500_JP1                     BIT(1)  /* Automatic CMOS */
+#define TS5500_JP2                     BIT(2)  /* Enable Serial Console */
+#define TS5500_JP3                     BIT(3)  /* Write Enable Drive A */
+#define TS5500_JP4                     BIT(4)  /* Fast Console (115K baud) */
+#define TS5500_JP5                     BIT(5)  /* User Jumper */
+#define TS5500_JP6                     BIT(6)  /* Console on COM1 (req. JP2) */
+#define TS5500_JP7                     BIT(7)  /* Undocumented (Unused) */
+
+/* A/D Converter registers */
+#define TS5500_ADC_CONV_BUSY_ADDR      0x195   /* Conversion state register */
+#define TS5500_ADC_CONV_BUSY           BIT(0)
+#define TS5500_ADC_CONV_INIT_LSB_ADDR  0x196   /* Start conv. / LSB register */
+#define TS5500_ADC_CONV_MSB_ADDR       0x197   /* MSB register */
+#define TS5500_ADC_CONV_DELAY          12      /* usec */
+
+/**
+ * struct ts5500_sbc - TS-5500 board description
+ * @id:                Board product ID.
+ * @sram:      Flag for SRAM option.
+ * @rs485:     Flag for RS-485 option.
+ * @adc:       Flag for Analog/Digital converter option.
+ * @ereset:    Flag for External Reset option.
+ * @itr:       Flag for Industrial Temperature Range option.
+ * @jumpers:   Bitfield for jumpers' state.
+ */
+struct ts5500_sbc {
+       int     id;
+       bool    sram;
+       bool    rs485;
+       bool    adc;
+       bool    ereset;
+       bool    itr;
+       u8      jumpers;
+};
+
+/* Board signatures in BIOS shadow RAM */
+static const struct {
+       const char * const string;
+       const ssize_t offset;
+} ts5500_signatures[] __initdata = {
+       { "TS-5x00 AMD Elan", 0xb14 },
+};
+
+static int __init ts5500_check_signature(void)
+{
+       void __iomem *bios;
+       int i, ret = -ENODEV;
+
+       bios = ioremap(0xf0000, 0x10000);
+       if (!bios)
+               return -ENOMEM;
+
+       for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) {
+               if (check_signature(bios + ts5500_signatures[i].offset,
+                                   ts5500_signatures[i].string,
+                                   strlen(ts5500_signatures[i].string))) {
+                       ret = 0;
+                       break;
+               }
+       }
+
+       iounmap(bios);
+       return ret;
+}
+
+static int __init ts5500_detect_config(struct ts5500_sbc *sbc)
+{
+       u8 tmp;
+       int ret = 0;
+
+       if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500"))
+               return -EBUSY;
+
+       tmp = inb(TS5500_PRODUCT_CODE_ADDR);
+       if (tmp != TS5500_PRODUCT_CODE) {
+               pr_err("This platform is not a TS-5500 (found ID 0x%x)\n", tmp);
+               ret = -ENODEV;
+               goto cleanup;
+       }
+       sbc->id = tmp;
+
+       tmp = inb(TS5500_SRAM_RS485_ADC_ADDR);
+       sbc->sram = tmp & TS5500_SRAM;
+       sbc->rs485 = tmp & TS5500_RS485;
+       sbc->adc = tmp & TS5500_ADC;
+
+       tmp = inb(TS5500_ERESET_ITR_ADDR);
+       sbc->ereset = tmp & TS5500_ERESET;
+       sbc->itr = tmp & TS5500_ITR;
+
+       tmp = inb(TS5500_LED_JP_ADDR);
+       sbc->jumpers = tmp & ~TS5500_LED;
+
+cleanup:
+       release_region(TS5500_PRODUCT_CODE_ADDR, 4);
+       return ret;
+}
+
+static ssize_t ts5500_show_id(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+       return sprintf(buf, "0x%.2x\n", sbc->id);
+}
+
+static ssize_t ts5500_show_jumpers(struct device *dev,
+                                  struct device_attribute *attr,
+                                  char *buf)
+{
+       struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+       return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1);
+}
+
+#define TS5500_SHOW(field)                                     \
+       static ssize_t ts5500_show_##field(struct device *dev,  \
+                       struct device_attribute *attr,          \
+                       char *buf)                              \
+       {                                                       \
+               struct ts5500_sbc *sbc = dev_get_drvdata(dev);  \
+               return sprintf(buf, "%d\n", sbc->field);        \
+       }
+
+TS5500_SHOW(sram)
+TS5500_SHOW(rs485)
+TS5500_SHOW(adc)
+TS5500_SHOW(ereset)
+TS5500_SHOW(itr)
+
+static DEVICE_ATTR(id, S_IRUGO, ts5500_show_id, NULL);
+static DEVICE_ATTR(jumpers, S_IRUGO, ts5500_show_jumpers, NULL);
+static DEVICE_ATTR(sram, S_IRUGO, ts5500_show_sram, NULL);
+static DEVICE_ATTR(rs485, S_IRUGO, ts5500_show_rs485, NULL);
+static DEVICE_ATTR(adc, S_IRUGO, ts5500_show_adc, NULL);
+static DEVICE_ATTR(ereset, S_IRUGO, ts5500_show_ereset, NULL);
+static DEVICE_ATTR(itr, S_IRUGO, ts5500_show_itr, NULL);
+
+static struct attribute *ts5500_attributes[] = {
+       &dev_attr_id.attr,
+       &dev_attr_jumpers.attr,
+       &dev_attr_sram.attr,
+       &dev_attr_rs485.attr,
+       &dev_attr_adc.attr,
+       &dev_attr_ereset.attr,
+       &dev_attr_itr.attr,
+       NULL
+};
+
+static const struct attribute_group ts5500_attr_group = {
+       .attrs = ts5500_attributes,
+};
+
+static struct resource ts5500_dio1_resource[] = {
+       DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"),
+};
+
+static struct platform_device ts5500_dio1_pdev = {
+       .name = "ts5500-dio1",
+       .id = -1,
+       .resource = ts5500_dio1_resource,
+       .num_resources = 1,
+};
+
+static struct resource ts5500_dio2_resource[] = {
+       DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"),
+};
+
+static struct platform_device ts5500_dio2_pdev = {
+       .name = "ts5500-dio2",
+       .id = -1,
+       .resource = ts5500_dio2_resource,
+       .num_resources = 1,
+};
+
+static void ts5500_led_set(struct led_classdev *led_cdev,
+                          enum led_brightness brightness)
+{
+       outb(!!brightness, TS5500_LED_JP_ADDR);
+}
+
+static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev)
+{
+       return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF;
+}
+
+static struct led_classdev ts5500_led_cdev = {
+       .name = "ts5500:green:",
+       .brightness_set = ts5500_led_set,
+       .brightness_get = ts5500_led_get,
+};
+
+static int ts5500_adc_convert(u8 ctrl)
+{
+       u8 lsb, msb;
+
+       /* Start conversion (ensure the 3 MSB are set to 0) */
+       outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR);
+
+       /*
+        * The platform has CPLD logic driving the A/D converter.
+        * The conversion must complete within 11 microseconds,
+        * otherwise we have to re-initiate a conversion.
+        */
+       udelay(TS5500_ADC_CONV_DELAY);
+       if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY)
+               return -EBUSY;
+
+       /* Read the raw data */
+       lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR);
+       msb = inb(TS5500_ADC_CONV_MSB_ADDR);
+
+       return (msb << 8) | lsb;
+}
+
+static struct max197_platform_data ts5500_adc_pdata = {
+       .convert = ts5500_adc_convert,
+};
+
+static struct platform_device ts5500_adc_pdev = {
+       .name = "max197",
+       .id = -1,
+       .dev = {
+               .platform_data = &ts5500_adc_pdata,
+       },
+};
+
+static int __init ts5500_init(void)
+{
+       struct platform_device *pdev;
+       struct ts5500_sbc *sbc;
+       int err;
+
+       /*
+        * There is no DMI available or PCI bridge subvendor info,
+        * only the BIOS provides a 16-bit identification call.
+        * It is safer to find a signature in the BIOS shadow RAM.
+        */
+       err = ts5500_check_signature();
+       if (err)
+               return err;
+
+       pdev = platform_device_register_simple("ts5500", -1, NULL, 0);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL);
+       if (!sbc) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       err = ts5500_detect_config(sbc);
+       if (err)
+               goto error;
+
+       platform_set_drvdata(pdev, sbc);
+
+       err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group);
+       if (err)
+               goto error;
+
+       ts5500_dio1_pdev.dev.parent = &pdev->dev;
+       if (platform_device_register(&ts5500_dio1_pdev))
+               dev_warn(&pdev->dev, "DIO1 block registration failed\n");
+       ts5500_dio2_pdev.dev.parent = &pdev->dev;
+       if (platform_device_register(&ts5500_dio2_pdev))
+               dev_warn(&pdev->dev, "DIO2 block registration failed\n");
+
+       if (led_classdev_register(&pdev->dev, &ts5500_led_cdev))
+               dev_warn(&pdev->dev, "LED registration failed\n");
+
+       if (sbc->adc) {
+               ts5500_adc_pdev.dev.parent = &pdev->dev;
+               if (platform_device_register(&ts5500_adc_pdev))
+                       dev_warn(&pdev->dev, "ADC registration failed\n");
+       }
+
+       return 0;
+error:
+       platform_device_unregister(pdev);
+       return err;
+}
+device_initcall(ts5500_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
+MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver");
index b8b3a37c80cd75e96559e67876206ad603b53741..0f92173a12b6ee1a1d5512c867585cd1ee07fc12 100644 (file)
@@ -1034,7 +1034,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  * globally purge translation cache of a virtual address or all TLB's
  * @cpumask: mask of all cpu's in which the address is to be removed
  * @mm: mm_struct containing virtual address range
- * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
+ * @start: start virtual address to be removed from TLB
+ * @end: end virtual address to be remove from TLB
  * @cpu: the current cpu
  *
  * This is the entry point for initiating any UV global TLB shootdown.
@@ -1056,7 +1057,7 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
  */
 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                struct mm_struct *mm, unsigned long start,
-                               unsigned end, unsigned int cpu)
+                               unsigned long end, unsigned int cpu)
 {
        int locals = 0;
        int remotes = 0;
@@ -1113,7 +1114,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 
        record_send_statistics(stat, locals, hubs, remotes, bau_desc);
 
-       bau_desc->payload.address = start;
+       if (!end || (end - start) <= PAGE_SIZE)
+               bau_desc->payload.address = start;
+       else
+               bau_desc->payload.address = TLB_FLUSH_ALL;
        bau_desc->payload.sending_cpu = cpu;
        /*
         * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
@@ -1463,7 +1467,7 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
        }
 
        if (input_arg == 0) {
-               elements = sizeof(stat_description)/sizeof(*stat_description);
+               elements = ARRAY_SIZE(stat_description);
                printk(KERN_DEBUG "# cpu:      cpu number\n");
                printk(KERN_DEBUG "Sender statistics:\n");
                for (i = 0; i < elements; i++)
@@ -1504,7 +1508,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
        char *q;
        int cnt = 0;
        int val;
-       int e = sizeof(tunables) / sizeof(*tunables);
+       int e = ARRAY_SIZE(tunables);
 
        p = instr + strspn(instr, WHITESPACE);
        q = p;
index 5032e0d19b8670b14cda20890a80dd0b91129b25..98718f604eb67e5a811f888084c27dff81f69c45 100644 (file)
@@ -15,7 +15,7 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  *
- *  Copyright (c) 2009 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) 2009-2013 Silicon Graphics, Inc.  All Rights Reserved.
  *  Copyright (c) Dimitri Sivanich
  */
 #include <linux/clockchips.h>
@@ -102,9 +102,10 @@ static int uv_intr_pending(int pnode)
        if (is_uv1_hub())
                return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
                        UV1H_EVENT_OCCURRED0_RTC1_MASK;
-       else
-               return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) &
-                       UV2H_EVENT_OCCURRED2_RTC_1_MASK;
+       else if (is_uvx_hub())
+               return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
+                       UVXH_EVENT_OCCURRED2_RTC_1_MASK;
+       return 0;
 }
 
 /* Setup interrupt and return non-zero if early expiration occurred. */
@@ -122,8 +123,8 @@ static int uv_setup_intr(int cpu, u64 expires)
                uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
                                UV1H_EVENT_OCCURRED0_RTC1_MASK);
        else
-               uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS,
-                               UV2H_EVENT_OCCURRED2_RTC_1_MASK);
+               uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
+                               UVXH_EVENT_OCCURRED2_RTC_1_MASK);
 
        val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
                ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
index cc2f8c1312862e96006a546ae355292391039958..872eb60e78064a90900e9133a29728a6c3774cee 100644 (file)
@@ -55,7 +55,7 @@ static FILE           *input_file;    /* Input file name */
 static void usage(const char *err)
 {
        if (err)
-               fprintf(stderr, "Error: %s\n\n", err);
+               fprintf(stderr, "%s: Error: %s\n\n", prog, err);
        fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
        fprintf(stderr, "\t-y   64bit mode\n");
        fprintf(stderr, "\t-n   32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
                insns++;
        }
 
-       fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
+       fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
+               prog,
+               (errors) ? "Failure" : "Success",
+               insns,
+               (input_file) ? "given" : "random",
+               errors,
+               seed);
 
        return errors ? 1 : 0;
 }
index 5a1847d619306e5f0ed5ed5570c53c69a8ce2315..79d67bd507fa6c5bd779defa312943b1d99a47f6 100644 (file)
@@ -814,12 +814,14 @@ int main(int argc, char **argv)
        read_relocs(fp);
        if (show_absolute_syms) {
                print_absolute_symbols();
-               return 0;
+               goto out;
        }
        if (show_absolute_relocs) {
                print_absolute_relocs();
-               return 0;
+               goto out;
        }
        emit_relocs(as_text, use_real_mode);
+out:
+       fclose(fp);
        return 0;
 }
index 8784ab30d91b5504932701ca31cdbbd0476135f6..84ac7f7b0257d6f1398cd41a7043fad25b73dc76 100644 (file)
@@ -20,7 +20,7 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
        const struct exception_table_entry *fixup;
 
        fixup = search_exception_tables(address);
-       if (fixup != 0) {
+       if (fixup) {
                UPT_IP(regs) = fixup->fixup;
                return 1;
        }
index 205ad328aa52810975dd332771a16911f5e6935b..c74436e687bf8984b722efa1a657e1a6a8b47f3b 100644 (file)
@@ -60,7 +60,7 @@ notrace static cycle_t vread_tsc(void)
 
 static notrace cycle_t vread_hpet(void)
 {
-       return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+       return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
 }
 
 #ifdef CONFIG_PARAVIRT_CLOCK
index 138e5667409a3415492e860c8cfa708028d6d467..39928d16be3bf86e671426387504ee68b86d08cd 100644 (file)
@@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void)
 #endif
 }
 
-#ifdef CONFIG_XEN_PVHVM
-#define HVM_SHARED_INFO_ADDR 0xFE700000UL
-static struct shared_info *xen_hvm_shared_info;
-static unsigned long xen_hvm_sip_phys;
-static int xen_major, xen_minor;
-
-static void xen_hvm_connect_shared_info(unsigned long pfn)
+void __ref xen_hvm_init_shared_info(void)
 {
+       int cpu;
        struct xen_add_to_physmap xatp;
+       static struct shared_info *shared_info_page = 0;
 
+       if (!shared_info_page)
+               shared_info_page = (struct shared_info *)
+                       extend_brk(PAGE_SIZE, PAGE_SIZE);
        xatp.domid = DOMID_SELF;
        xatp.idx = 0;
        xatp.space = XENMAPSPACE_shared_info;
-       xatp.gpfn = pfn;
+       xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
        if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
                BUG();
 
-}
-static void __init xen_hvm_set_shared_info(struct shared_info *sip)
-{
-       int cpu;
-
-       HYPERVISOR_shared_info = sip;
+       HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
 
        /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
         * page, we use it in the event channel upcall and in some pvclock
         * related functions. We don't need the vcpu_info placement
         * optimizations because we don't use any pv_mmu or pv_irq op on
-        * HVM. */
-       for_each_online_cpu(cpu)
+        * HVM.
+        * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
+        * online but xen_hvm_init_shared_info is run at resume time too and
+        * in that case multiple vcpus might be online. */
+       for_each_online_cpu(cpu) {
                per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
-}
-
-/* Reconnect the shared_info pfn to a (new) mfn */
-void xen_hvm_resume_shared_info(void)
-{
-       xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
-}
-
-/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
- * On these old tools the shared info page will be placed in E820_Ram.
- * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
- * that nothing is mapped up to HVM_SHARED_INFO_ADDR.
- * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
- * here for the shared info page. */
-static void __init xen_hvm_init_shared_info(void)
-{
-       if (xen_major < 4) {
-               xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
-               xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
-       } else {
-               xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
-               set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
-               xen_hvm_shared_info =
-               (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
        }
-       xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
-       xen_hvm_set_shared_info(xen_hvm_shared_info);
 }
 
+#ifdef CONFIG_XEN_PVHVM
 static void __init init_hvm_pv_info(void)
 {
-       uint32_t ecx, edx, pages, msr, base;
+       int major, minor;
+       uint32_t eax, ebx, ecx, edx, pages, msr, base;
        u64 pfn;
 
        base = xen_cpuid_base();
+       cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+
+       major = eax >> 16;
+       minor = eax & 0xffff;
+       printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
+
        cpuid(base + 2, &pages, &msr, &ecx, &edx);
 
        pfn = __pa(hypercall_page);
@@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void)
 
 static bool __init xen_hvm_platform(void)
 {
-       uint32_t eax, ebx, ecx, edx, base;
-
        if (xen_pv_domain())
                return false;
 
-       base = xen_cpuid_base();
-       if (!base)
+       if (!xen_cpuid_base())
                return false;
 
-       cpuid(base + 1, &eax, &ebx, &ecx, &edx);
-
-       xen_major = eax >> 16;
-       xen_minor = eax & 0xffff;
-
-       printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
-
        return true;
 }
 
@@ -1668,6 +1637,7 @@ const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
        .name                   = "Xen HVM",
        .detect                 = xen_hvm_platform,
        .init_platform          = xen_hvm_guest_init,
+       .x2apic_available       = xen_x2apic_para_available,
 };
 EXPORT_SYMBOL(x86_hyper_xen_hvm);
 #endif
index 4f7d2599b484a67222cbfa68f7b929c35975732e..34bc4cee8887b2c2d974c99ca81b78577f719ff9 100644 (file)
@@ -432,13 +432,6 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
        play_dead_common();
        HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
        cpu_bringup();
-       /*
-        * Balance out the preempt calls - as we are running in cpu_idle
-        * loop which has been called at bootup from cpu_bringup_and_idle.
-        * The cpucpu_bringup_and_idle called cpu_bringup which made a
-        * preempt_disable() So this preempt_enable will balance it out.
-        */
-       preempt_enable();
 }
 
 #else /* !CONFIG_HOTPLUG_CPU */
index ae8a00c39de4b0d02cfad0722715d0fbd159a1cb..45329c8c226e4c4070f16a791b300265a0bf472b 100644 (file)
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
 {
 #ifdef CONFIG_XEN_PVHVM
        int cpu;
-       xen_hvm_resume_shared_info();
+       xen_hvm_init_shared_info();
        xen_callback_vector();
        xen_unplug_emulated_devices();
        if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
index f9643fc50de571636347a1e0510d45f916728967..33ca6e42a4caabb412350563a1f8349fd77b7789 100644 (file)
@@ -89,11 +89,11 @@ ENTRY(xen_iret)
         */
 #ifdef CONFIG_SMP
        GET_THREAD_INFO(%eax)
-       movl TI_cpu(%eax), %eax
-       movl __per_cpu_offset(,%eax,4), %eax
-       mov xen_vcpu(%eax), %eax
+       movl %ss:TI_cpu(%eax), %eax
+       movl %ss:__per_cpu_offset(,%eax,4), %eax
+       mov %ss:xen_vcpu(%eax), %eax
 #else
-       movl xen_vcpu, %eax
+       movl %ss:xen_vcpu, %eax
 #endif
 
        /* check IF state we're restoring */
@@ -106,11 +106,11 @@ ENTRY(xen_iret)
         * resuming the code, so we don't have to be worried about
         * being preempted to another CPU.
         */
-       setz XEN_vcpu_info_mask(%eax)
+       setz %ss:XEN_vcpu_info_mask(%eax)
 xen_iret_start_crit:
 
        /* check for unmasked and pending */
-       cmpw $0x0001, XEN_vcpu_info_pending(%eax)
+       cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
 
        /*
         * If there's something pending, mask events again so we can
@@ -118,7 +118,7 @@ xen_iret_start_crit:
         * touch XEN_vcpu_info_mask.
         */
        jne 1f
-       movb $1, XEN_vcpu_info_mask(%eax)
+       movb $1, %ss:XEN_vcpu_info_mask(%eax)
 
 1:     popl %eax
 
index d2e73d19d366d54820e453f44ff886cecea8d174..a95b41744ad0bafdcde069cb965cabb67ab0f61b 100644 (file)
@@ -40,7 +40,7 @@ void xen_enable_syscall(void);
 void xen_vcpu_restore(void);
 
 void xen_callback_vector(void);
-void xen_hvm_resume_shared_info(void);
+void xen_hvm_init_shared_info(void);
 void xen_unplug_emulated_devices(void);
 
 void __init xen_build_dynamic_phys_to_machine(void);
index 4acb5feba1fb7ae0bb830fed17b0391ae7a8483a..172a02a6ad146fea24ab966cf46a3612434a03da 100644 (file)
@@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
        consistent_sync(vaddr, size, direction);
 }
 
+/* Not supported for now */
+static inline int dma_mmap_coherent(struct device *dev,
+                                   struct vm_area_struct *vma, void *cpu_addr,
+                                   dma_addr_t dma_addr, size_t size)
+{
+       return -EINVAL;
+}
+
+static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+                                 void *cpu_addr, dma_addr_t dma_addr,
+                                 size_t size)
+{
+       return -EINVAL;
+}
+
 #endif /* _XTENSA_DMA_MAPPING_H */
index 74638ec234c807ccaaed0a77ce0eceb71e2997c7..c88202f973d944f7e9c847b13d48c8ce24c87230 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/bio.h>
 #include <linux/blkdev.h>
+#include <linux/sched/sysctl.h>
 
 #include "blk.h"
 
index 9edba1b8323ed713f17f475511a370d2cdd8df47..603b2c178740afd4bcc047faed6e1314e30591a8 100644 (file)
@@ -100,14 +100,14 @@ static void elevator_put(struct elevator_type *e)
        module_put(e->elevator_owner);
 }
 
-static struct elevator_type *elevator_get(const char *name)
+static struct elevator_type *elevator_get(const char *name, bool try_loading)
 {
        struct elevator_type *e;
 
        spin_lock(&elv_list_lock);
 
        e = elevator_find(name);
-       if (!e) {
+       if (!e && try_loading) {
                spin_unlock(&elv_list_lock);
                request_module("%s-iosched", name);
                spin_lock(&elv_list_lock);
@@ -136,6 +136,22 @@ static int __init elevator_setup(char *str)
 
 __setup("elevator=", elevator_setup);
 
+/* called during boot to load the elevator chosen by the elevator param */
+void __init load_default_elevator_module(void)
+{
+       struct elevator_type *e;
+
+       if (!chosen_elevator[0])
+               return;
+
+       spin_lock(&elv_list_lock);
+       e = elevator_find(chosen_elevator);
+       spin_unlock(&elv_list_lock);
+
+       if (!e)
+               request_module("%s-iosched", chosen_elevator);
+}
+
 static struct kobj_type elv_ktype;
 
 static struct elevator_queue *elevator_alloc(struct request_queue *q,
@@ -191,25 +207,30 @@ int elevator_init(struct request_queue *q, char *name)
        q->boundary_rq = NULL;
 
        if (name) {
-               e = elevator_get(name);
+               e = elevator_get(name, true);
                if (!e)
                        return -EINVAL;
        }
 
+       /*
+        * Use the default elevator specified by config boot param or
+        * config option.  Don't try to load modules as we could be running
+        * off async and request_module() isn't allowed from async.
+        */
        if (!e && *chosen_elevator) {
-               e = elevator_get(chosen_elevator);
+               e = elevator_get(chosen_elevator, false);
                if (!e)
                        printk(KERN_ERR "I/O scheduler %s not found\n",
                                                        chosen_elevator);
        }
 
        if (!e) {
-               e = elevator_get(CONFIG_DEFAULT_IOSCHED);
+               e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
                if (!e) {
                        printk(KERN_ERR
                                "Default I/O scheduler not found. " \
                                "Using noop.\n");
-                       e = elevator_get("noop");
+                       e = elevator_get("noop", false);
                }
        }
 
@@ -951,7 +972,7 @@ int elevator_change(struct request_queue *q, const char *name)
                return -ENXIO;
 
        strlcpy(elevator_name, name, sizeof(elevator_name));
-       e = elevator_get(strstrip(elevator_name));
+       e = elevator_get(strstrip(elevator_name), true);
        if (!e) {
                printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
                return -EINVAL;
index 9a289d7c84bbedc6ca2e17cf74c201acc43fe7cd..3993ebf4135fb1adb8e25c9562dadd996d1c6be2 100644 (file)
@@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);
 
 static struct device_type disk_type;
 
+static void disk_check_events(struct disk_events *ev,
+                             unsigned int *clearing_ptr);
 static void disk_alloc_events(struct gendisk *disk);
 static void disk_add_events(struct gendisk *disk);
 static void disk_del_events(struct gendisk *disk);
@@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
        const struct block_device_operations *bdops = disk->fops;
        struct disk_events *ev = disk->ev;
        unsigned int pending;
+       unsigned int clearing = mask;
 
        if (!ev) {
                /* for drivers still using the old ->media_changed method */
@@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
                return 0;
        }
 
-       /* tell the workfn about the events being cleared */
+       disk_block_events(disk);
+
+       /*
+        * store the union of mask and ev->clearing on the stack so that the
+        * race with disk_flush_events does not cause ambiguity (ev->clearing
+        * can still be modified even if events are blocked).
+        */
        spin_lock_irq(&ev->lock);
-       ev->clearing |= mask;
+       clearing |= ev->clearing;
+       ev->clearing = 0;
        spin_unlock_irq(&ev->lock);
 
-       /* uncondtionally schedule event check and wait for it to finish */
-       disk_block_events(disk);
-       queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
-       flush_delayed_work(&ev->dwork);
-       __disk_unblock_events(disk, false);
+       disk_check_events(ev, &clearing);
+       /*
+        * if ev->clearing is not 0, the disk_flush_events got called in the
+        * middle of this function, so we want to run the workfn without delay.
+        */
+       __disk_unblock_events(disk, ev->clearing ? true : false);
 
        /* then, fetch and clear pending events */
        spin_lock_irq(&ev->lock);
-       WARN_ON_ONCE(ev->clearing & mask);      /* cleared by workfn */
        pending = ev->pending & mask;
        ev->pending &= ~mask;
        spin_unlock_irq(&ev->lock);
+       WARN_ON_ONCE(clearing & mask);
 
        return pending;
 }
 
+/*
+ * Separate this part out so that a different pointer for clearing_ptr can be
+ * passed in for disk_clear_events.
+ */
 static void disk_events_workfn(struct work_struct *work)
 {
        struct delayed_work *dwork = to_delayed_work(work);
        struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+
+       disk_check_events(ev, &ev->clearing);
+}
+
+static void disk_check_events(struct disk_events *ev,
+                             unsigned int *clearing_ptr)
+{
        struct gendisk *disk = ev->disk;
        char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
-       unsigned int clearing = ev->clearing;
+       unsigned int clearing = *clearing_ptr;
        unsigned int events;
        unsigned long intv;
        int nr_events = 0, i;
@@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)
 
        events &= ~ev->pending;
        ev->pending |= events;
-       ev->clearing &= ~clearing;
+       *clearing_ptr &= ~clearing;
 
        intv = disk_events_poll_jiffies(disk);
        if (!ev->block && intv)
index 00a783661d0b5548b65882f312faa4a116ff5762..46f80e2c92f7da893ff28f5993d89fa01efc323a 100644 (file)
@@ -590,6 +590,9 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
        if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
            *access_bit_width < 32)
                *access_bit_width = 32;
+       else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
+           *access_bit_width < 64)
+               *access_bit_width = 64;
 
        if ((bit_width + bit_offset) > *access_bit_width) {
                pr_warning(FW_BUG APEI_PFX
index e6defd86b42454e98d2828abf2d99240d9e68b18..1e5d8a40101e274f5d6ce2bfe1a24a486a671af7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/time.h>
 #include <linux/cper.h>
 #include <linux/acpi.h>
+#include <linux/pci.h>
 #include <linux/aer.h>
 
 /*
@@ -249,6 +250,10 @@ static const char *cper_pcie_port_type_strs[] = {
 static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                            const struct acpi_hest_generic_data *gdata)
 {
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+       struct pci_dev *dev;
+#endif
+
        if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
                printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
                       pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ?
@@ -281,10 +286,18 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
        "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
        pfx, pcie->bridge.secondary_status, pcie->bridge.control);
 #ifdef CONFIG_ACPI_APEI_PCIEAER
-       if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) {
-               struct aer_capability_regs *aer_regs = (void *)pcie->aer_info;
-               cper_print_aer(pfx, gdata->error_severity, aer_regs);
+       dev = pci_get_domain_bus_and_slot(pcie->device_id.segment,
+                       pcie->device_id.bus, pcie->device_id.function);
+       if (!dev) {
+               pr_err("PCI AER Cannot get PCI device %04x:%02x:%02x.%d\n",
+                       pcie->device_id.segment, pcie->device_id.bus,
+                       pcie->device_id.slot, pcie->device_id.function);
+               return;
        }
+       if (pcie->validation_bits & CPER_PCIE_VALID_AER_INFO)
+               cper_print_aer(pfx, dev, gdata->error_severity,
+                               (struct aer_capability_regs *) pcie->aer_info);
+       pci_dev_put(dev);
 #endif
 }
 
index 95af6f674a6cc15b9e726d2cee7440ffa4b130fb..35da18113216c367eae553371e9164a726a70f44 100644 (file)
@@ -297,7 +297,7 @@ static int acpi_platform_notify(struct device *dev)
        if (!ret) {
                struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
 
-               acpi_get_name(dev->acpi_handle, ACPI_FULL_PATHNAME, &buffer);
+               acpi_get_name(ACPI_HANDLE(dev), ACPI_FULL_PATHNAME, &buffer);
                DBG("Device %s -> %s\n", dev_name(dev), (char *)buffer.pointer);
                kfree(buffer.pointer);
        } else
index 3ff267861541f7570a5b110b7893e0cb012f5917..bd22f8667eed65e3889e61dcc357d85f5f3d1239 100644 (file)
@@ -250,7 +250,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
                return acpi_rsdp;
 #endif
 
-       if (efi_enabled) {
+       if (efi_enabled(EFI_CONFIG_TABLES)) {
                if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
                        return efi.acpi20;
                else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
index f1a5da44591dcd1b415b57e638165318bc13e793..ed9a1cc690be5fa8cd3844394128216350904a7e 100644 (file)
@@ -958,6 +958,9 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
                return -EINVAL;
        }
 
+       if (!dev)
+               return -EINVAL;
+
        dev->cpu = pr->id;
 
        if (max_cstate == 0)
@@ -1149,6 +1152,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
                }
 
                /* Populate Updated C-state information */
+               acpi_processor_get_power_info(pr);
                acpi_processor_setup_cpuidle_states(pr);
 
                /* Enable all cpuidle devices */
index 836bfe0690422855c80ecfe41895a4fe667bfd14..53e7ac9403a7dc21502e90a698ef1b27ad668a37 100644 (file)
@@ -340,6 +340,13 @@ static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
        if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
            || boot_cpu_data.x86 == 0x11) {
                rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
+               /*
+                * MSR C001_0064+:
+                * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
+                */
+               if (!(hi & BIT(31)))
+                       return;
+
                fid = lo & 0x3f;
                did = (lo >> 6) & 7;
                if (boot_cpu_data.x86 == 0x10)
index 7862d17976b7532f48204cbf4210ffb6d4984f97..495aeed26779c96ad7555ad646f0d3ae03ca69dc 100644 (file)
@@ -53,6 +53,7 @@
 
 enum {
        AHCI_PCI_BAR_STA2X11    = 0,
+       AHCI_PCI_BAR_ENMOTUS    = 2,
        AHCI_PCI_BAR_STANDARD   = 5,
 };
 
@@ -410,6 +411,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },   /* ASM1061 */
        { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
+       /* Enmotus */
+       { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
        /* Generic, PCI class code for AHCI */
        { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
          PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -1057,6 +1061,86 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+int ahci_init_interrupts(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
+{
+       int rc;
+       unsigned int maxvec;
+
+       if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) {
+               rc = pci_enable_msi_block_auto(pdev, &maxvec);
+               if (rc > 0) {
+                       if ((rc == maxvec) || (rc == 1))
+                               return rc;
+                       /*
+                        * Assume that advantage of multipe MSIs is negated,
+                        * so fallback to single MSI mode to save resources
+                        */
+                       pci_disable_msi(pdev);
+                       if (!pci_enable_msi(pdev))
+                               return 1;
+               }
+       }
+
+       pci_intx(pdev, 1);
+       return 0;
+}
+
+/**
+ *     ahci_host_activate - start AHCI host, request IRQs and register it
+ *     @host: target ATA host
+ *     @irq: base IRQ number to request
+ *     @n_msis: number of MSIs allocated for this host
+ *     @irq_handler: irq_handler used when requesting IRQs
+ *     @irq_flags: irq_flags used when requesting IRQs
+ *
+ *     Similar to ata_host_activate, but requests IRQs according to AHCI-1.1
+ *     when multiple MSIs were allocated. That is one MSI per port, starting
+ *     from @irq.
+ *
+ *     LOCKING:
+ *     Inherited from calling layer (may sleep).
+ *
+ *     RETURNS:
+ *     0 on success, -errno otherwise.
+ */
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
+{
+       int i, rc;
+
+       /* Sharing Last Message among several ports is not supported */
+       if (n_msis < host->n_ports)
+               return -EINVAL;
+
+       rc = ata_host_start(host);
+       if (rc)
+               return rc;
+
+       for (i = 0; i < host->n_ports; i++) {
+               rc = devm_request_threaded_irq(host->dev,
+                       irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
+                       dev_driver_string(host->dev), host->ports[i]);
+               if (rc)
+                       goto out_free_irqs;
+       }
+
+       for (i = 0; i < host->n_ports; i++)
+               ata_port_desc(host->ports[i], "irq %d", irq + i);
+
+       rc = ata_host_register(host, &ahci_sht);
+       if (rc)
+               goto out_free_all_irqs;
+
+       return 0;
+
+out_free_all_irqs:
+       i = host->n_ports;
+out_free_irqs:
+       for (i--; i >= 0; i--)
+               devm_free_irq(host->dev, irq + i, host->ports[i]);
+
+       return rc;
+}
+
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        unsigned int board_id = ent->driver_data;
@@ -1065,7 +1149,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct device *dev = &pdev->dev;
        struct ahci_host_priv *hpriv;
        struct ata_host *host;
-       int n_ports, i, rc;
+       int n_ports, n_msis, i, rc;
        int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
 
        VPRINTK("ENTER\n");
@@ -1098,9 +1182,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_info(&pdev->dev,
                         "PDC42819 can only drive SATA devices with this driver\n");
 
-       /* The Connext uses non-standard BAR */
+       /* Both Connext and Enmotus devices use non-standard BARs */
        if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
                ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+       else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+               ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
 
        /* acquire resources */
        rc = pcim_enable_device(pdev);
@@ -1150,11 +1236,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ahci_sb600_enable_64bit(pdev))
                hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
 
-       if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
-               pci_intx(pdev, 1);
-
        hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
 
+       n_msis = ahci_init_interrupts(pdev, hpriv);
+       if (n_msis > 1)
+               hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
@@ -1250,6 +1337,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        ahci_pci_print_info(host);
 
        pci_set_master(pdev);
+
+       if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
+               return ahci_host_activate(host, pdev->irq, n_msis);
+
        return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
                                 &ahci_sht);
 }
index 9be471200a07657f1f655ea31d805e0d6223c24b..b830e6c9fe49f385b77405c188b100a7afcc0570 100644 (file)
@@ -231,6 +231,7 @@ enum {
        AHCI_HFLAG_DELAY_ENGINE         = (1 << 15), /* do not start engine on
                                                        port start (wait until
                                                        error-handling stage) */
+       AHCI_HFLAG_MULTI_MSI            = (1 << 16), /* multiple PCI MSIs */
 
        /* ap->flags bits */
 
@@ -297,6 +298,8 @@ struct ahci_port_priv {
        unsigned int            ncq_saw_d2h:1;
        unsigned int            ncq_saw_dmas:1;
        unsigned int            ncq_saw_sdb:1;
+       u32                     intr_status;    /* interrupts to handle */
+       spinlock_t              lock;           /* protects parent ata_port */
        u32                     intr_mask;      /* interrupts to enable */
        bool                    fbs_supported;  /* set iff FBS is supported */
        bool                    fbs_enabled;    /* set iff FBS is enabled */
@@ -359,7 +362,10 @@ void ahci_set_em_messages(struct ahci_host_priv *hpriv,
                          struct ata_port_info *pi);
 int ahci_reset_em(struct ata_host *host);
 irqreturn_t ahci_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
+int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
                                             unsigned int port_no)
index 320712a7b9eaac3174f3a96de8693b63dee5f1af..34c82167b9625f41666125685148b68774036fd9 100644 (file)
@@ -1655,19 +1655,16 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
                ata_port_abort(ap);
 }
 
-static void ahci_port_intr(struct ata_port *ap)
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+                                      void __iomem *port_mmio, u32 status)
 {
-       void __iomem *port_mmio = ahci_port_base(ap);
        struct ata_eh_info *ehi = &ap->link.eh_info;
        struct ahci_port_priv *pp = ap->private_data;
        struct ahci_host_priv *hpriv = ap->host->private_data;
        int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
-       u32 status, qc_active = 0;
+       u32 qc_active = 0;
        int rc;
 
-       status = readl(port_mmio + PORT_IRQ_STAT);
-       writel(status, port_mmio + PORT_IRQ_STAT);
-
        /* ignore BAD_PMP while resetting */
        if (unlikely(resetting))
                status &= ~PORT_IRQ_BAD_PMP;
@@ -1743,6 +1740,107 @@ static void ahci_port_intr(struct ata_port *ap)
        }
 }
 
+void ahci_port_intr(struct ata_port *ap)
+{
+       void __iomem *port_mmio = ahci_port_base(ap);
+       u32 status;
+
+       status = readl(port_mmio + PORT_IRQ_STAT);
+       writel(status, port_mmio + PORT_IRQ_STAT);
+
+       ahci_handle_port_interrupt(ap, port_mmio, status);
+}
+
+irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
+{
+       struct ata_port *ap = dev_instance;
+       struct ahci_port_priv *pp = ap->private_data;
+       void __iomem *port_mmio = ahci_port_base(ap);
+       unsigned long flags;
+       u32 status;
+
+       spin_lock_irqsave(&ap->host->lock, flags);
+       status = pp->intr_status;
+       if (status)
+               pp->intr_status = 0;
+       spin_unlock_irqrestore(&ap->host->lock, flags);
+
+       spin_lock_bh(ap->lock);
+       ahci_handle_port_interrupt(ap, port_mmio, status);
+       spin_unlock_bh(ap->lock);
+
+       return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(ahci_thread_fn);
+
+void ahci_hw_port_interrupt(struct ata_port *ap)
+{
+       void __iomem *port_mmio = ahci_port_base(ap);
+       struct ahci_port_priv *pp = ap->private_data;
+       u32 status;
+
+       status = readl(port_mmio + PORT_IRQ_STAT);
+       writel(status, port_mmio + PORT_IRQ_STAT);
+
+       pp->intr_status |= status;
+}
+
+irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance)
+{
+       struct ata_port *ap_this = dev_instance;
+       struct ahci_port_priv *pp = ap_this->private_data;
+       struct ata_host *host = ap_this->host;
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *mmio = hpriv->mmio;
+       unsigned int i;
+       u32 irq_stat, irq_masked;
+
+       VPRINTK("ENTER\n");
+
+       spin_lock(&host->lock);
+
+       irq_stat = readl(mmio + HOST_IRQ_STAT);
+
+       if (!irq_stat) {
+               u32 status = pp->intr_status;
+
+               spin_unlock(&host->lock);
+
+               VPRINTK("EXIT\n");
+
+               return status ? IRQ_WAKE_THREAD : IRQ_NONE;
+       }
+
+       irq_masked = irq_stat & hpriv->port_map;
+
+       for (i = 0; i < host->n_ports; i++) {
+               struct ata_port *ap;
+
+               if (!(irq_masked & (1 << i)))
+                       continue;
+
+               ap = host->ports[i];
+               if (ap) {
+                       ahci_hw_port_interrupt(ap);
+                       VPRINTK("port %u\n", i);
+               } else {
+                       VPRINTK("port %u (no irq)\n", i);
+                       if (ata_ratelimit())
+                               dev_warn(host->dev,
+                                        "interrupt on disabled port %u\n", i);
+               }
+       }
+
+       writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+       spin_unlock(&host->lock);
+
+       VPRINTK("EXIT\n");
+
+       return IRQ_WAKE_THREAD;
+}
+EXPORT_SYMBOL_GPL(ahci_hw_interrupt);
+
 irqreturn_t ahci_interrupt(int irq, void *dev_instance)
 {
        struct ata_host *host = dev_instance;
@@ -1951,13 +2049,13 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
        /* Use the nominal value 10 ms if the read MDAT is zero,
         * the nominal value of DETO is 20 ms.
         */
-       if (dev->sata_settings[ATA_LOG_DEVSLP_VALID] &
+       if (dev->devslp_timing[ATA_LOG_DEVSLP_VALID] &
            ATA_LOG_DEVSLP_VALID_MASK) {
-               mdat = dev->sata_settings[ATA_LOG_DEVSLP_MDAT] &
+               mdat = dev->devslp_timing[ATA_LOG_DEVSLP_MDAT] &
                       ATA_LOG_DEVSLP_MDAT_MASK;
                if (!mdat)
                        mdat = 10;
-               deto = dev->sata_settings[ATA_LOG_DEVSLP_DETO];
+               deto = dev->devslp_timing[ATA_LOG_DEVSLP_DETO];
                if (!deto)
                        deto = 20;
        } else {
@@ -2196,6 +2294,14 @@ static int ahci_port_start(struct ata_port *ap)
         */
        pp->intr_mask = DEF_PORT_IRQ;
 
+       /*
+        * Switch to per-port locking in case each port has its own MSI vector.
+        */
+       if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+               spin_lock_init(&pp->lock);
+               ap->lock = &pp->lock;
+       }
+
        ap->private_data = pp;
 
        /* engage engines, captain */
index 9e8b99af400dc53539ba4e8d45d36696e4818b65..46cd3f4c6aaaf8122c82ea8f60f7e6bcbaf435c6 100644 (file)
@@ -2325,24 +2325,28 @@ int ata_dev_configure(struct ata_device *dev)
                        }
                }
 
-               /* check and mark DevSlp capability */
-               if (ata_id_has_devslp(dev->id))
-                       dev->flags |= ATA_DFLAG_DEVSLP;
-
-               /* Obtain SATA Settings page from Identify Device Data Log,
-                * which contains DevSlp timing variables etc.
-                * Exclude old devices with ata_id_has_ncq()
+               /* Check and mark DevSlp capability. Get DevSlp timing variables
+                * from SATA Settings page of Identify Device Data Log.
                 */
-               if (ata_id_has_ncq(dev->id)) {
+               if (ata_id_has_devslp(dev->id)) {
+                       u8 sata_setting[ATA_SECT_SIZE];
+                       int i, j;
+
+                       dev->flags |= ATA_DFLAG_DEVSLP;
                        err_mask = ata_read_log_page(dev,
                                                     ATA_LOG_SATA_ID_DEV_DATA,
                                                     ATA_LOG_SATA_SETTINGS,
-                                                    dev->sata_settings,
+                                                    sata_setting,
                                                     1);
                        if (err_mask)
                                ata_dev_dbg(dev,
                                            "failed to get Identify Device Data, Emask 0x%x\n",
                                            err_mask);
+                       else
+                               for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
+                                       j = ATA_LOG_DEVSLP_OFFSET + i;
+                                       dev->devslp_timing[i] = sata_setting[j];
+                               }
                }
 
                dev->cdb_len = 16;
index bf039b0e97b79d7f9c9d2bae803ad84850b181b4..bcf4437214f50141e2d1bfe17a0ad02af6f4d81b 100644 (file)
@@ -2094,7 +2094,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
  */
 static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
 {
-       if (qc->flags & AC_ERR_MEDIA)
+       if (qc->err_mask & AC_ERR_MEDIA)
                return 0;       /* don't retry media errors */
        if (qc->flags & ATA_QCFLAG_IO)
                return 1;       /* otherwise retry anything from fs stack */
index 6a0955e6d4fc92231e59d36e0849dd46c9e0e60b..53ecac5a2161d729fc957b8b4379a526ca89e0d3 100644 (file)
@@ -636,82 +636,82 @@ struct rx_buf_desc {
 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE  
 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE  
 
-typedef volatile u_int  freg_t;
+typedef volatile u_int ffreg_t;
 typedef u_int   rreg_t;
 
 typedef struct _ffredn_t {
-        freg_t  idlehead_high;  /* Idle cell header (high)              */
-        freg_t  idlehead_low;   /* Idle cell header (low)               */
-        freg_t  maxrate;        /* Maximum rate                         */
-        freg_t  stparms;        /* Traffic Management Parameters        */
-        freg_t  abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
-        freg_t  rm_type;        /*                                      */
-        u_int   filler5[0x17 - 0x06];
-        freg_t  cmd_reg;        /* Command register                     */
-        u_int   filler18[0x20 - 0x18];
-        freg_t  cbr_base;       /* CBR Pointer Base                     */
-        freg_t  vbr_base;       /* VBR Pointer Base                     */
-        freg_t  abr_base;       /* ABR Pointer Base                     */
-        freg_t  ubr_base;       /* UBR Pointer Base                     */
-        u_int   filler24;
-        freg_t  vbrwq_base;     /* VBR Wait Queue Base                  */
-        freg_t  abrwq_base;     /* ABR Wait Queue Base                  */
-        freg_t  ubrwq_base;     /* UBR Wait Queue Base                  */
-        freg_t  vct_base;       /* Main VC Table Base                   */
-        freg_t  vcte_base;      /* Extended Main VC Table Base          */
-        u_int   filler2a[0x2C - 0x2A];
-        freg_t  cbr_tab_beg;    /* CBR Table Begin                      */
-        freg_t  cbr_tab_end;    /* CBR Table End                        */
-        freg_t  cbr_pointer;    /* CBR Pointer                          */
-        u_int   filler2f[0x30 - 0x2F];
-        freg_t  prq_st_adr;     /* Packet Ready Queue Start Address     */
-        freg_t  prq_ed_adr;     /* Packet Ready Queue End Address       */
-        freg_t  prq_rd_ptr;     /* Packet Ready Queue read pointer      */
-        freg_t  prq_wr_ptr;     /* Packet Ready Queue write pointer     */
-        freg_t  tcq_st_adr;     /* Transmit Complete Queue Start Address*/
-        freg_t  tcq_ed_adr;     /* Transmit Complete Queue End Address  */
-        freg_t  tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
-        freg_t  tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
-        u_int   filler38[0x40 - 0x38];
-        freg_t  queue_base;     /* Base address for PRQ and TCQ         */
-        freg_t  desc_base;      /* Base address of descriptor table     */
-        u_int   filler42[0x45 - 0x42];
-        freg_t  mode_reg_0;     /* Mode register 0                      */
-        freg_t  mode_reg_1;     /* Mode register 1                      */
-        freg_t  intr_status_reg;/* Interrupt Status register            */
-        freg_t  mask_reg;       /* Mask Register                        */
-        freg_t  cell_ctr_high1; /* Total cell transfer count (high)     */
-        freg_t  cell_ctr_lo1;   /* Total cell transfer count (low)      */
-        freg_t  state_reg;      /* Status register                      */
-        u_int   filler4c[0x58 - 0x4c];
-        freg_t  curr_desc_num;  /* Contains the current descriptor num  */
-        freg_t  next_desc;      /* Next descriptor                      */
-        freg_t  next_vc;        /* Next VC                              */
-        u_int   filler5b[0x5d - 0x5b];
-        freg_t  present_slot_cnt;/* Present slot count                  */
-        u_int   filler5e[0x6a - 0x5e];
-        freg_t  new_desc_num;   /* New descriptor number                */
-        freg_t  new_vc;         /* New VC                               */
-        freg_t  sched_tbl_ptr;  /* Schedule table pointer               */
-        freg_t  vbrwq_wptr;     /* VBR wait queue write pointer         */
-        freg_t  vbrwq_rptr;     /* VBR wait queue read pointer          */
-        freg_t  abrwq_wptr;     /* ABR wait queue write pointer         */
-        freg_t  abrwq_rptr;     /* ABR wait queue read pointer          */
-        freg_t  ubrwq_wptr;     /* UBR wait queue write pointer         */
-        freg_t  ubrwq_rptr;     /* UBR wait queue read pointer          */
-        freg_t  cbr_vc;         /* CBR VC                               */
-        freg_t  vbr_sb_vc;      /* VBR SB VC                            */
-        freg_t  abr_sb_vc;      /* ABR SB VC                            */
-        freg_t  ubr_sb_vc;      /* UBR SB VC                            */
-        freg_t  vbr_next_link;  /* VBR next link                        */
-        freg_t  abr_next_link;  /* ABR next link                        */
-        freg_t  ubr_next_link;  /* UBR next link                        */
-        u_int   filler7a[0x7c-0x7a];
-        freg_t  out_rate_head;  /* Out of rate head                     */
-        u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
-        freg_t  cell_ctr_high1_nc;/* Total cell transfer count (high)   */
-        freg_t  cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
-        u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
+       ffreg_t idlehead_high;  /* Idle cell header (high)              */
+       ffreg_t idlehead_low;   /* Idle cell header (low)               */
+       ffreg_t maxrate;        /* Maximum rate                         */
+       ffreg_t stparms;        /* Traffic Management Parameters        */
+       ffreg_t abrubr_abr;     /* ABRUBR Priority Byte 1, TCR Byte 0   */
+       ffreg_t rm_type;        /*                                      */
+       u_int   filler5[0x17 - 0x06];
+       ffreg_t cmd_reg;        /* Command register                     */
+       u_int   filler18[0x20 - 0x18];
+       ffreg_t cbr_base;       /* CBR Pointer Base                     */
+       ffreg_t vbr_base;       /* VBR Pointer Base                     */
+       ffreg_t abr_base;       /* ABR Pointer Base                     */
+       ffreg_t ubr_base;       /* UBR Pointer Base                     */
+       u_int   filler24;
+       ffreg_t vbrwq_base;     /* VBR Wait Queue Base                  */
+       ffreg_t abrwq_base;     /* ABR Wait Queue Base                  */
+       ffreg_t ubrwq_base;     /* UBR Wait Queue Base                  */
+       ffreg_t vct_base;       /* Main VC Table Base                   */
+       ffreg_t vcte_base;      /* Extended Main VC Table Base          */
+       u_int   filler2a[0x2C - 0x2A];
+       ffreg_t cbr_tab_beg;    /* CBR Table Begin                      */
+       ffreg_t cbr_tab_end;    /* CBR Table End                        */
+       ffreg_t cbr_pointer;    /* CBR Pointer                          */
+       u_int   filler2f[0x30 - 0x2F];
+       ffreg_t prq_st_adr;     /* Packet Ready Queue Start Address     */
+       ffreg_t prq_ed_adr;     /* Packet Ready Queue End Address       */
+       ffreg_t prq_rd_ptr;     /* Packet Ready Queue read pointer      */
+       ffreg_t prq_wr_ptr;     /* Packet Ready Queue write pointer     */
+       ffreg_t tcq_st_adr;     /* Transmit Complete Queue Start Address*/
+       ffreg_t tcq_ed_adr;     /* Transmit Complete Queue End Address  */
+       ffreg_t tcq_rd_ptr;     /* Transmit Complete Queue read pointer */
+       ffreg_t tcq_wr_ptr;     /* Transmit Complete Queue write pointer*/
+       u_int   filler38[0x40 - 0x38];
+       ffreg_t queue_base;     /* Base address for PRQ and TCQ         */
+       ffreg_t desc_base;      /* Base address of descriptor table     */
+       u_int   filler42[0x45 - 0x42];
+       ffreg_t mode_reg_0;     /* Mode register 0                      */
+       ffreg_t mode_reg_1;     /* Mode register 1                      */
+       ffreg_t intr_status_reg;/* Interrupt Status register            */
+       ffreg_t mask_reg;       /* Mask Register                        */
+       ffreg_t cell_ctr_high1; /* Total cell transfer count (high)     */
+       ffreg_t cell_ctr_lo1;   /* Total cell transfer count (low)      */
+       ffreg_t state_reg;      /* Status register                      */
+       u_int   filler4c[0x58 - 0x4c];
+       ffreg_t curr_desc_num;  /* Contains the current descriptor num  */
+       ffreg_t next_desc;      /* Next descriptor                      */
+       ffreg_t next_vc;        /* Next VC                              */
+       u_int   filler5b[0x5d - 0x5b];
+       ffreg_t present_slot_cnt;/* Present slot count                  */
+       u_int   filler5e[0x6a - 0x5e];
+       ffreg_t new_desc_num;   /* New descriptor number                */
+       ffreg_t new_vc;         /* New VC                               */
+       ffreg_t sched_tbl_ptr;  /* Schedule table pointer               */
+       ffreg_t vbrwq_wptr;     /* VBR wait queue write pointer         */
+       ffreg_t vbrwq_rptr;     /* VBR wait queue read pointer          */
+       ffreg_t abrwq_wptr;     /* ABR wait queue write pointer         */
+       ffreg_t abrwq_rptr;     /* ABR wait queue read pointer          */
+       ffreg_t ubrwq_wptr;     /* UBR wait queue write pointer         */
+       ffreg_t ubrwq_rptr;     /* UBR wait queue read pointer          */
+       ffreg_t cbr_vc;         /* CBR VC                               */
+       ffreg_t vbr_sb_vc;      /* VBR SB VC                            */
+       ffreg_t abr_sb_vc;      /* ABR SB VC                            */
+       ffreg_t ubr_sb_vc;      /* UBR SB VC                            */
+       ffreg_t vbr_next_link;  /* VBR next link                        */
+       ffreg_t abr_next_link;  /* ABR next link                        */
+       ffreg_t ubr_next_link;  /* UBR next link                        */
+       u_int   filler7a[0x7c-0x7a];
+       ffreg_t out_rate_head;  /* Out of rate head                     */
+       u_int   filler7d[0xca-0x7d]; /* pad out to full address space   */
+       ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high)   */
+       ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low)      */
+       u_int   fillercc[0x100-0xcc]; /* pad out to full address space   */
 } ffredn_t;
 
 typedef struct _rfredn_t {
index 5aa2d703d19fac08073c54e27530ef8d5bc5b45d..4e22ce3ed73d408b97d316c61fefc089e824c3c3 100644 (file)
@@ -21,6 +21,7 @@ endif
 obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
 obj-$(CONFIG_REGMAP)   += regmap/
 obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_PINCTRL) += pinctrl.o
 
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
index 63452943abd1e2a509661f934d51be5431259b86..fb10728f63721565d282e2298f2bf14641fb8f1e 100644 (file)
@@ -224,7 +224,7 @@ static void cpu_device_release(struct device *dev)
         * by the cpu device.
         *
         * Never copy this way of doing things, or you too will be made fun of
-        * on the linux-kerenl list, you have been warned.
+        * on the linux-kernel list, you have been warned.
         */
 }
 
index e3bbed8a617c257373a1fa2d75318d9ff930601a..656310156ddeeeabc0d709f3393b9af27e949214 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/wait.h>
 #include <linux/async.h>
 #include <linux/pm_runtime.h>
+#include <linux/pinctrl/devinfo.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -269,6 +270,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
        WARN_ON(!list_empty(&dev->devres_head));
 
        dev->driver = drv;
+
+       /* If using pinctrl, bind pins now before probing */
+       ret = pinctrl_bind_pins(dev);
+       if (ret)
+               goto probe_failed;
+
        if (driver_sysfs_add(dev)) {
                printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
                        __func__, dev_name(dev));
index d81460309182fde4177865a922f740c278c7d40c..b392b353be39016818abd096da01fb4a18839450 100644 (file)
@@ -305,7 +305,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
        char *buf;
 
        size = fw_file_size(file);
-       if (size < 0)
+       if (size <= 0)
                return false;
        buf = vmalloc(size);
        if (!buf)
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644 (file)
index 0000000..67a274e
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Driver core interface to the pinctrl subsystem.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/device.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * pinctrl_bind_pins() - called by the device core before probe
+ * @dev: the device that is just about to probe
+ */
+int pinctrl_bind_pins(struct device *dev)
+{
+       int ret;
+
+       dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
+       if (!dev->pins)
+               return -ENOMEM;
+
+       dev->pins->p = devm_pinctrl_get(dev);
+       if (IS_ERR(dev->pins->p)) {
+               dev_dbg(dev, "no pinctrl handle\n");
+               ret = PTR_ERR(dev->pins->p);
+               goto cleanup_alloc;
+       }
+
+       dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
+                                       PINCTRL_STATE_DEFAULT);
+       if (IS_ERR(dev->pins->default_state)) {
+               dev_dbg(dev, "no default pinctrl state\n");
+               ret = 0;
+               goto cleanup_get;
+       }
+
+       ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
+       if (ret) {
+               dev_dbg(dev, "failed to activate default pinctrl state\n");
+               goto cleanup_get;
+       }
+
+       return 0;
+
+       /*
+        * If no pinctrl handle or default state was found for this device,
+        * let's explicitly free the pin container in the device, there is
+        * no point in keeping it around.
+        */
+cleanup_get:
+       devm_pinctrl_put(dev->pins->p);
+cleanup_alloc:
+       devm_kfree(dev, dev->pins);
+       dev->pins = NULL;
+
+       /* Only return deferrals */
+       if (ret != -EPROBE_DEFER)
+               ret = 0;
+
+       return ret;
+}
index 5e75d1b683e236cf8ecc35a5c6f9b28ced8d30f3..cf129980abd0b2637f02acd446b2882d787751fd 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_REGMAP) += regmap.o regcache.o
-obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
 obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
 obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
 obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
index 401d1919635a5fcdf9bac6b84d99ffdc485d0e14..5a22bd33ce3d7704df94411f216009ee24019336 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/regmap.h>
 #include <linux/fs.h>
 #include <linux/list.h>
+#include <linux/wait.h>
 
 struct regmap;
 struct regcache_ops;
@@ -25,6 +26,7 @@ struct regmap_debugfs_off_cache {
        off_t min;
        off_t max;
        unsigned int base_reg;
+       unsigned int max_reg;
 };
 
 struct regmap_format {
@@ -39,6 +41,13 @@ struct regmap_format {
        unsigned int (*parse_val)(void *buf);
 };
 
+struct regmap_async {
+       struct list_head list;
+       struct work_struct cleanup;
+       struct regmap *map;
+       void *work_buf;
+};
+
 struct regmap {
        struct mutex mutex;
        spinlock_t spinlock;
@@ -53,6 +62,11 @@ struct regmap {
        void *bus_context;
        const char *name;
 
+       spinlock_t async_lock;
+       wait_queue_head_t async_waitq;
+       struct list_head async_list;
+       int async_ret;
+
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debugfs;
        const char *debugfs_name;
@@ -74,6 +88,11 @@ struct regmap {
        const struct regmap_access_table *volatile_table;
        const struct regmap_access_table *precious_table;
 
+       int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+       int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+       bool defer_caching;
+
        u8 read_flag_mask;
        u8 write_flag_mask;
 
@@ -175,7 +194,10 @@ bool regcache_set_val(void *base, unsigned int idx,
                      unsigned int val, unsigned int word_size);
 int regcache_lookup_reg(struct regmap *map, unsigned int reg);
 
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
 extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
 
 #endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644 (file)
index 0000000..d9762e4
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Register cache access API - flat caching support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+
+static int regcache_flat_init(struct regmap *map)
+{
+       int i;
+       unsigned int *cache;
+
+       map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
+                            GFP_KERNEL);
+       if (!map->cache)
+               return -ENOMEM;
+
+       cache = map->cache;
+
+       for (i = 0; i < map->num_reg_defaults; i++)
+               cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
+
+       return 0;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+       kfree(map->cache);
+       map->cache = NULL;
+
+       return 0;
+}
+
+static int regcache_flat_read(struct regmap *map,
+                             unsigned int reg, unsigned int *value)
+{
+       unsigned int *cache = map->cache;
+
+       *value = cache[reg];
+
+       return 0;
+}
+
+static int regcache_flat_write(struct regmap *map, unsigned int reg,
+                              unsigned int value)
+{
+       unsigned int *cache = map->cache;
+
+       cache[reg] = value;
+
+       return 0;
+}
+
+struct regcache_ops regcache_flat_ops = {
+       .type = REGCACHE_FLAT,
+       .name = "flat",
+       .init = regcache_flat_init,
+       .exit = regcache_flat_exit,
+       .read = regcache_flat_read,
+       .write = regcache_flat_write,
+};
index 835883bda977c27cdeb1d5ec45e63a6e69c47269..e69ff3e4742c262eceda875b10cc384af8149496 100644 (file)
@@ -22,6 +22,7 @@
 static const struct regcache_ops *cache_types[] = {
        &regcache_rbtree_ops,
        &regcache_lzo_ops,
+       &regcache_flat_ops,
 };
 
 static int regcache_hw_init(struct regmap *map)
index 07aad786f817e88300af9c4d6d685391f41b3471..78d5f20c5f5b61ec4e60c51a62bd70f8f5592c46 100644 (file)
@@ -56,6 +56,19 @@ static const struct file_operations regmap_name_fops = {
        .llseek = default_llseek,
 };
 
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+       struct regmap_debugfs_off_cache *c;
+
+       while (!list_empty(&map->debugfs_off_cache)) {
+               c = list_first_entry(&map->debugfs_off_cache,
+                                    struct regmap_debugfs_off_cache,
+                                    list);
+               list_del(&c->list);
+               kfree(c);
+       }
+}
+
 /*
  * Work out where the start offset maps into register numbers, bearing
  * in mind that we suppress hidden registers.
@@ -68,6 +81,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
        struct regmap_debugfs_off_cache *c = NULL;
        loff_t p = 0;
        unsigned int i, ret;
+       unsigned int fpos_offset;
+       unsigned int reg_offset;
 
        /*
         * If we don't have a cache build one so we don't have to do a
@@ -80,6 +95,9 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
                            regmap_precious(map, i)) {
                                if (c) {
                                        c->max = p - 1;
+                                       fpos_offset = c->max - c->min;
+                                       reg_offset = fpos_offset / map->debugfs_tot_len;
+                                       c->max_reg = c->base_reg + reg_offset;
                                        list_add_tail(&c->list,
                                                      &map->debugfs_off_cache);
                                        c = NULL;
@@ -91,8 +109,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
                        /* No cache entry?  Start a new one */
                        if (!c) {
                                c = kzalloc(sizeof(*c), GFP_KERNEL);
-                               if (!c)
-                                       break;
+                               if (!c) {
+                                       regmap_debugfs_free_dump_cache(map);
+                                       return base;
+                               }
                                c->min = p;
                                c->base_reg = i;
                        }
@@ -101,19 +121,53 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
                }
        }
 
-       /* Find the relevant block */
+       /* Close the last entry off if we didn't scan beyond it */
+       if (c) {
+               c->max = p - 1;
+               fpos_offset = c->max - c->min;
+               reg_offset = fpos_offset / map->debugfs_tot_len;
+               c->max_reg = c->base_reg + reg_offset;
+               list_add_tail(&c->list,
+                             &map->debugfs_off_cache);
+       }
+
+       /*
+        * This should never happen; we return above if we fail to
+        * allocate and we should never be in this code if there are
+        * no registers at all.
+        */
+       WARN_ON(list_empty(&map->debugfs_off_cache));
+       ret = base;
+
+       /* Find the relevant block:offset */
        list_for_each_entry(c, &map->debugfs_off_cache, list) {
-               if (*pos >= c->min && *pos <= c->max) {
-                       *pos = c->min;
-                       return c->base_reg;
+               if (from >= c->min && from <= c->max) {
+                       fpos_offset = from - c->min;
+                       reg_offset = fpos_offset / map->debugfs_tot_len;
+                       *pos = c->min + (reg_offset * map->debugfs_tot_len);
+                       return c->base_reg + reg_offset;
                }
 
-               ret = c->max;
+               *pos = c->max;
+               ret = c->max_reg;
        }
 
        return ret;
 }
 
+static inline void regmap_calc_tot_len(struct regmap *map,
+                                      void *buf, size_t count)
+{
+       /* Calculate the length of a fixed format  */
+       if (!map->debugfs_tot_len) {
+               map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
+                                                          buf, count);
+               map->debugfs_val_len = 2 * map->format.val_bytes;
+               map->debugfs_tot_len = map->debugfs_reg_len +
+                       map->debugfs_val_len + 3;      /* : \n */
+       }
+}
+
 static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
                                   unsigned int to, char __user *user_buf,
                                   size_t count, loff_t *ppos)
@@ -132,14 +186,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
        if (!buf)
                return -ENOMEM;
 
-       /* Calculate the length of a fixed format  */
-       if (!map->debugfs_tot_len) {
-               map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
-                                                          buf, count);
-               map->debugfs_val_len = 2 * map->format.val_bytes;
-               map->debugfs_tot_len = map->debugfs_reg_len +
-                       map->debugfs_val_len + 3;      /* : \n */
-       }
+       regmap_calc_tot_len(map, buf, count);
 
        /* Work out which register we're starting at */
        start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
@@ -154,7 +201,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
                /* If we're in the region the user is trying to read */
                if (p >= *ppos) {
                        /* ...but not beyond it */
-                       if (buf_pos + 1 + map->debugfs_tot_len >= count)
+                       if (buf_pos + map->debugfs_tot_len > count)
                                break;
 
                        /* Format the register */
@@ -387,16 +434,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
 
 void regmap_debugfs_exit(struct regmap *map)
 {
-       struct regmap_debugfs_off_cache *c;
-
        debugfs_remove_recursive(map->debugfs);
-       while (!list_empty(&map->debugfs_off_cache)) {
-               c = list_first_entry(&map->debugfs_off_cache,
-                                    struct regmap_debugfs_off_cache,
-                                    list);
-               list_del(&c->list);
-               kfree(c);
-       }
+       regmap_debugfs_free_dump_cache(map);
        kfree(map->debugfs_name);
 }
 
index 5972ad958544e3c875a2196294d83c74d7e44cc9..4706c63d0bc63fdc8d9bdba1e96d3e1cb543a806 100644 (file)
@@ -34,6 +34,7 @@ struct regmap_irq_chip_data {
        int irq;
        int wake_count;
 
+       void *status_reg_buf;
        unsigned int *status_buf;
        unsigned int *mask_buf;
        unsigned int *mask_buf_def;
@@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
                if (ret != 0)
                        dev_err(d->map->dev, "Failed to sync masks in %x\n",
                                reg);
+
+               reg = d->chip->wake_base +
+                       (i * map->reg_stride * d->irq_reg_stride);
+               if (d->wake_buf) {
+                       if (d->chip->wake_invert)
+                               ret = regmap_update_bits(d->map, reg,
+                                                        d->mask_buf_def[i],
+                                                        ~d->wake_buf[i]);
+                       else
+                               ret = regmap_update_bits(d->map, reg,
+                                                        d->mask_buf_def[i],
+                                                        d->wake_buf[i]);
+                       if (ret != 0)
+                               dev_err(d->map->dev,
+                                       "Failed to sync wakes in %x: %d\n",
+                                       reg, ret);
+               }
        }
 
        if (d->chip->runtime_pm)
@@ -129,16 +147,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
        struct regmap *map = d->map;
        const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
 
-       if (!d->chip->wake_base)
-               return -EINVAL;
-
        if (on) {
-               d->wake_buf[irq_data->reg_offset / map->reg_stride]
-                       &= ~irq_data->mask;
+               if (d->wake_buf)
+                       d->wake_buf[irq_data->reg_offset / map->reg_stride]
+                               &= ~irq_data->mask;
                d->wake_count++;
        } else {
-               d->wake_buf[irq_data->reg_offset / map->reg_stride]
-                       |= irq_data->mask;
+               if (d->wake_buf)
+                       d->wake_buf[irq_data->reg_offset / map->reg_stride]
+                               |= irq_data->mask;
                d->wake_count--;
        }
 
@@ -172,25 +189,69 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
        }
 
        /*
-        * Ignore masked IRQs and ack if we need to; we ack early so
-        * there is no race between handling and acknowleding the
-        * interrupt.  We assume that typically few of the interrupts
-        * will fire simultaneously so don't worry about overhead from
-        * doing a write per register.
+        * Read in the statuses, using a single bulk read if possible
+        * in order to reduce the I/O overheads.
         */
-       for (i = 0; i < data->chip->num_regs; i++) {
-               ret = regmap_read(map, chip->status_base + (i * map->reg_stride
-                                  * data->irq_reg_stride),
-                                  &data->status_buf[i]);
+       if (!map->use_single_rw && map->reg_stride == 1 &&
+           data->irq_reg_stride == 1) {
+               u8 *buf8 = data->status_reg_buf;
+               u16 *buf16 = data->status_reg_buf;
+               u32 *buf32 = data->status_reg_buf;
 
+               BUG_ON(!data->status_reg_buf);
+
+               ret = regmap_bulk_read(map, chip->status_base,
+                                      data->status_reg_buf,
+                                      chip->num_regs);
                if (ret != 0) {
                        dev_err(map->dev, "Failed to read IRQ status: %d\n",
-                                       ret);
-                       if (chip->runtime_pm)
-                               pm_runtime_put(map->dev);
+                               ret);
                        return IRQ_NONE;
                }
 
+               for (i = 0; i < data->chip->num_regs; i++) {
+                       switch (map->format.val_bytes) {
+                       case 1:
+                               data->status_buf[i] = buf8[i];
+                               break;
+                       case 2:
+                               data->status_buf[i] = buf16[i];
+                               break;
+                       case 4:
+                               data->status_buf[i] = buf32[i];
+                               break;
+                       default:
+                               BUG();
+                               return IRQ_NONE;
+                       }
+               }
+
+       } else {
+               for (i = 0; i < data->chip->num_regs; i++) {
+                       ret = regmap_read(map, chip->status_base +
+                                         (i * map->reg_stride
+                                          * data->irq_reg_stride),
+                                         &data->status_buf[i]);
+
+                       if (ret != 0) {
+                               dev_err(map->dev,
+                                       "Failed to read IRQ status: %d\n",
+                                       ret);
+                               if (chip->runtime_pm)
+                                       pm_runtime_put(map->dev);
+                               return IRQ_NONE;
+                       }
+               }
+       }
+
+       /*
+        * Ignore masked IRQs and ack if we need to; we ack early so
+        * there is no race between handling and acknowleding the
+        * interrupt.  We assume that typically few of the interrupts
+        * will fire simultaneously so don't worry about overhead from
+        * doing a write per register.
+        */
+       for (i = 0; i < data->chip->num_regs; i++) {
                data->status_buf[i] &= ~data->mask_buf[i];
 
                if (data->status_buf[i] && chip->ack_base) {
@@ -316,11 +377,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
 
        d->irq_chip = regmap_irq_chip;
        d->irq_chip.name = chip->name;
-       if (!chip->wake_base) {
-               d->irq_chip.irq_set_wake = NULL;
-               d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
-                                    IRQCHIP_SKIP_SET_WAKE;
-       }
        d->irq = irq;
        d->map = map;
        d->chip = chip;
@@ -331,6 +387,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
        else
                d->irq_reg_stride = 1;
 
+       if (!map->use_single_rw && map->reg_stride == 1 &&
+           d->irq_reg_stride == 1) {
+               d->status_reg_buf = kmalloc(map->format.val_bytes *
+                                           chip->num_regs, GFP_KERNEL);
+               if (!d->status_reg_buf)
+                       goto err_alloc;
+       }
+
        mutex_init(&d->lock);
 
        for (i = 0; i < chip->num_irqs; i++)
@@ -361,8 +425,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
                        d->wake_buf[i] = d->mask_buf_def[i];
                        reg = chip->wake_base +
                                (i * map->reg_stride * d->irq_reg_stride);
-                       ret = regmap_update_bits(map, reg, d->wake_buf[i],
-                                                d->wake_buf[i]);
+
+                       if (chip->wake_invert)
+                               ret = regmap_update_bits(map, reg,
+                                                        d->mask_buf_def[i],
+                                                        0);
+                       else
+                               ret = regmap_update_bits(map, reg,
+                                                        d->mask_buf_def[i],
+                                                        d->wake_buf[i]);
                        if (ret != 0) {
                                dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
                                        reg, ret);
@@ -401,6 +472,7 @@ err_alloc:
        kfree(d->mask_buf_def);
        kfree(d->mask_buf);
        kfree(d->status_buf);
+       kfree(d->status_reg_buf);
        kfree(d);
        return ret;
 }
@@ -422,6 +494,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
        kfree(d->wake_buf);
        kfree(d->mask_buf_def);
        kfree(d->mask_buf);
+       kfree(d->status_reg_buf);
        kfree(d->status_buf);
        kfree(d);
 }
index f05fc74dd84a5f46b8cb32ed3943972fffe056e7..98745dd77e8ccfb75080d47fb099448d81d1eaa8 100644 (file)
@@ -16,6 +16,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -26,6 +27,7 @@
 struct regmap_mmio_context {
        void __iomem *regs;
        unsigned val_bytes;
+       struct clk *clk;
 };
 
 static int regmap_mmio_gather_write(void *context,
@@ -34,9 +36,16 @@ static int regmap_mmio_gather_write(void *context,
 {
        struct regmap_mmio_context *ctx = context;
        u32 offset;
+       int ret;
 
        BUG_ON(reg_size != 4);
 
+       if (ctx->clk) {
+               ret = clk_enable(ctx->clk);
+               if (ret < 0)
+                       return ret;
+       }
+
        offset = *(u32 *)reg;
 
        while (val_size) {
@@ -64,6 +73,9 @@ static int regmap_mmio_gather_write(void *context,
                offset += ctx->val_bytes;
        }
 
+       if (ctx->clk)
+               clk_disable(ctx->clk);
+
        return 0;
 }
 
@@ -80,9 +92,16 @@ static int regmap_mmio_read(void *context,
 {
        struct regmap_mmio_context *ctx = context;
        u32 offset;
+       int ret;
 
        BUG_ON(reg_size != 4);
 
+       if (ctx->clk) {
+               ret = clk_enable(ctx->clk);
+               if (ret < 0)
+                       return ret;
+       }
+
        offset = *(u32 *)reg;
 
        while (val_size) {
@@ -110,11 +129,20 @@ static int regmap_mmio_read(void *context,
                offset += ctx->val_bytes;
        }
 
+       if (ctx->clk)
+               clk_disable(ctx->clk);
+
        return 0;
 }
 
 static void regmap_mmio_free_context(void *context)
 {
+       struct regmap_mmio_context *ctx = context;
+
+       if (ctx->clk) {
+               clk_unprepare(ctx->clk);
+               clk_put(ctx->clk);
+       }
        kfree(context);
 }
 
@@ -128,11 +156,14 @@ static struct regmap_bus regmap_mmio = {
        .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
 };
 
-static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+                                       const char *clk_id,
+                                       void __iomem *regs,
                                        const struct regmap_config *config)
 {
        struct regmap_mmio_context *ctx;
        int min_stride;
+       int ret;
 
        if (config->reg_bits != 32)
                return ERR_PTR(-EINVAL);
@@ -179,37 +210,59 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
        ctx->regs = regs;
        ctx->val_bytes = config->val_bits / 8;
 
+       if (clk_id == NULL)
+               return ctx;
+
+       ctx->clk = clk_get(dev, clk_id);
+       if (IS_ERR(ctx->clk)) {
+               ret = PTR_ERR(ctx->clk);
+               goto err_free;
+       }
+
+       ret = clk_prepare(ctx->clk);
+       if (ret < 0) {
+               clk_put(ctx->clk);
+               goto err_free;
+       }
+
        return ctx;
+
+err_free:
+       kfree(ctx);
+
+       return ERR_PTR(ret);
 }
 
 /**
- * regmap_init_mmio(): Initialise register map
+ * regmap_init_mmio_clk(): Initialise register map with register clock
  *
  * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
  * @regs: Pointer to memory-mapped IO region
  * @config: Configuration for register map
  *
  * The return value will be an ERR_PTR() on error or a valid pointer to
  * a struct regmap.
  */
-struct regmap *regmap_init_mmio(struct device *dev,
-                               void __iomem *regs,
-                               const struct regmap_config *config)
+struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+                                   void __iomem *regs,
+                                   const struct regmap_config *config)
 {
        struct regmap_mmio_context *ctx;
 
-       ctx = regmap_mmio_gen_context(regs, config);
+       ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
        if (IS_ERR(ctx))
                return ERR_CAST(ctx);
 
        return regmap_init(dev, &regmap_mmio, ctx, config);
 }
-EXPORT_SYMBOL_GPL(regmap_init_mmio);
+EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
 
 /**
- * devm_regmap_init_mmio(): Initialise managed register map
+ * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
  *
  * @dev: Device that will be interacted with
+ * @clk_id: register clock consumer ID
  * @regs: Pointer to memory-mapped IO region
  * @config: Configuration for register map
  *
@@ -217,18 +270,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio);
  * to a struct regmap.  The regmap will be automatically freed by the
  * device management code.
  */
-struct regmap *devm_regmap_init_mmio(struct device *dev,
-                                    void __iomem *regs,
-                                    const struct regmap_config *config)
+struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+                                        void __iomem *regs,
+                                        const struct regmap_config *config)
 {
        struct regmap_mmio_context *ctx;
 
-       ctx = regmap_mmio_gen_context(regs, config);
+       ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
        if (IS_ERR(ctx))
                return ERR_CAST(ctx);
 
        return devm_regmap_init(dev, &regmap_mmio, ctx, config);
 }
-EXPORT_SYMBOL_GPL(devm_regmap_init_mmio);
+EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
 
 MODULE_LICENSE("GPL v2");
index ffa46a92ad3383dcc8609cd891514f0b5c3af967..4c506bd940f372b8bbe3b630a9599ca52ea745bf 100644 (file)
 #include <linux/init.h>
 #include <linux/module.h>
 
+#include "internal.h"
+
+struct regmap_async_spi {
+       struct regmap_async core;
+       struct spi_message m;
+       struct spi_transfer t[2];
+};
+
+static void regmap_spi_complete(void *data)
+{
+       struct regmap_async_spi *async = data;
+
+       regmap_async_complete_cb(&async->core, async->m.status);
+}
+
 static int regmap_spi_write(void *context, const void *data, size_t count)
 {
        struct device *dev = context;
@@ -40,6 +55,43 @@ static int regmap_spi_gather_write(void *context,
        return spi_sync(spi, &m);
 }
 
+static int regmap_spi_async_write(void *context,
+                                 const void *reg, size_t reg_len,
+                                 const void *val, size_t val_len,
+                                 struct regmap_async *a)
+{
+       struct regmap_async_spi *async = container_of(a,
+                                                     struct regmap_async_spi,
+                                                     core);
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+
+       async->t[0].tx_buf = reg;
+       async->t[0].len = reg_len;
+       async->t[1].tx_buf = val;
+       async->t[1].len = val_len;
+
+       spi_message_init(&async->m);
+       spi_message_add_tail(&async->t[0], &async->m);
+       spi_message_add_tail(&async->t[1], &async->m);
+
+       async->m.complete = regmap_spi_complete;
+       async->m.context = async;
+
+       return spi_async(spi, &async->m);
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+       struct regmap_async_spi *async_spi;
+
+       async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
+       if (!async_spi)
+               return NULL;
+
+       return &async_spi->core;
+}
+
 static int regmap_spi_read(void *context,
                           const void *reg, size_t reg_size,
                           void *val, size_t val_size)
@@ -53,6 +105,8 @@ static int regmap_spi_read(void *context,
 static struct regmap_bus regmap_spi = {
        .write = regmap_spi_write,
        .gather_write = regmap_spi_gather_write,
+       .async_write = regmap_spi_async_write,
+       .async_alloc = regmap_spi_async_alloc,
        .read = regmap_spi_read,
        .read_flag_mask = 0x80,
 };
index 42d5cb0f503fb825104d7a0f02f79b63ab6f63da..3d2367501fd0521ee4e472df6126b6ea25ee09a3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mutex.h>
 #include <linux/err.h>
 #include <linux/rbtree.h>
+#include <linux/sched.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/regmap.h>
@@ -34,6 +35,22 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
                               unsigned int mask, unsigned int val,
                               bool *change);
 
+static int _regmap_bus_read(void *context, unsigned int reg,
+                           unsigned int *val);
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+                                      unsigned int val);
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+                                unsigned int val);
+
+static void async_cleanup(struct work_struct *work)
+{
+       struct regmap_async *async = container_of(work, struct regmap_async,
+                                                 cleanup);
+
+       kfree(async->work_buf);
+       kfree(async);
+}
+
 bool regmap_reg_in_ranges(unsigned int reg,
                          const struct regmap_range *ranges,
                          unsigned int nranges)
@@ -372,7 +389,7 @@ struct regmap *regmap_init(struct device *dev,
        enum regmap_endian reg_endian, val_endian;
        int i, j;
 
-       if (!bus || !config)
+       if (!config)
                goto err;
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -386,7 +403,8 @@ struct regmap *regmap_init(struct device *dev,
                map->unlock = config->unlock;
                map->lock_arg = config->lock_arg;
        } else {
-               if (bus->fast_io) {
+               if ((bus && bus->fast_io) ||
+                   config->fast_io) {
                        spin_lock_init(&map->spinlock);
                        map->lock = regmap_lock_spinlock;
                        map->unlock = regmap_unlock_spinlock;
@@ -423,13 +441,27 @@ struct regmap *regmap_init(struct device *dev,
        map->cache_type = config->cache_type;
        map->name = config->name;
 
+       spin_lock_init(&map->async_lock);
+       INIT_LIST_HEAD(&map->async_list);
+       init_waitqueue_head(&map->async_waitq);
+
        if (config->read_flag_mask || config->write_flag_mask) {
                map->read_flag_mask = config->read_flag_mask;
                map->write_flag_mask = config->write_flag_mask;
-       } else {
+       } else if (bus) {
                map->read_flag_mask = bus->read_flag_mask;
        }
 
+       if (!bus) {
+               map->reg_read  = config->reg_read;
+               map->reg_write = config->reg_write;
+
+               map->defer_caching = false;
+               goto skip_format_initialization;
+       } else {
+               map->reg_read  = _regmap_bus_read;
+       }
+
        reg_endian = config->reg_format_endian;
        if (reg_endian == REGMAP_ENDIAN_DEFAULT)
                reg_endian = bus->reg_format_endian_default;
@@ -500,6 +532,12 @@ struct regmap *regmap_init(struct device *dev,
                }
                break;
 
+       case 24:
+               if (reg_endian != REGMAP_ENDIAN_BIG)
+                       goto err_map;
+               map->format.format_reg = regmap_format_24;
+               break;
+
        case 32:
                switch (reg_endian) {
                case REGMAP_ENDIAN_BIG:
@@ -575,6 +613,16 @@ struct regmap *regmap_init(struct device *dev,
                goto err_map;
        }
 
+       if (map->format.format_write) {
+               map->defer_caching = false;
+               map->reg_write = _regmap_bus_formatted_write;
+       } else if (map->format.format_val) {
+               map->defer_caching = true;
+               map->reg_write = _regmap_bus_raw_write;
+       }
+
+skip_format_initialization:
+
        map->range_tree = RB_ROOT;
        for (i = 0; i < config->num_ranges; i++) {
                const struct regmap_range_cfg *range_cfg = &config->ranges[i];
@@ -776,7 +824,7 @@ void regmap_exit(struct regmap *map)
        regcache_exit(map);
        regmap_debugfs_exit(map);
        regmap_range_exit(map);
-       if (map->bus->free_context)
+       if (map->bus && map->bus->free_context)
                map->bus->free_context(map->bus_context);
        kfree(map->work_buf);
        kfree(map);
@@ -870,15 +918,20 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
 }
 
 static int _regmap_raw_write(struct regmap *map, unsigned int reg,
-                            const void *val, size_t val_len)
+                            const void *val, size_t val_len, bool async)
 {
        struct regmap_range_node *range;
+       unsigned long flags;
        u8 *u8 = map->work_buf;
+       void *work_val = map->work_buf + map->format.reg_bytes +
+               map->format.pad_bytes;
        void *buf;
        int ret = -ENOTSUPP;
        size_t len;
        int i;
 
+       BUG_ON(!map->bus);
+
        /* Check for unwritable registers before we start */
        if (map->writeable_reg)
                for (i = 0; i < val_len / map->format.val_bytes; i++)
@@ -918,7 +971,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
                        dev_dbg(map->dev, "Writing window %d/%zu\n",
                                win_residue, val_len / map->format.val_bytes);
                        ret = _regmap_raw_write(map, reg, val, win_residue *
-                                               map->format.val_bytes);
+                                               map->format.val_bytes, async);
                        if (ret != 0)
                                return ret;
 
@@ -941,6 +994,50 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
        u8[0] |= map->write_flag_mask;
 
+       if (async && map->bus->async_write) {
+               struct regmap_async *async = map->bus->async_alloc();
+               if (!async)
+                       return -ENOMEM;
+
+               async->work_buf = kzalloc(map->format.buf_size,
+                                         GFP_KERNEL | GFP_DMA);
+               if (!async->work_buf) {
+                       kfree(async);
+                       return -ENOMEM;
+               }
+
+               INIT_WORK(&async->cleanup, async_cleanup);
+               async->map = map;
+
+               /* If the caller supplied the value we can use it safely. */
+               memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
+                      map->format.reg_bytes + map->format.val_bytes);
+               if (val == work_val)
+                       val = async->work_buf + map->format.pad_bytes +
+                               map->format.reg_bytes;
+
+               spin_lock_irqsave(&map->async_lock, flags);
+               list_add_tail(&async->list, &map->async_list);
+               spin_unlock_irqrestore(&map->async_lock, flags);
+
+               ret = map->bus->async_write(map->bus_context, async->work_buf,
+                                           map->format.reg_bytes +
+                                           map->format.pad_bytes,
+                                           val, val_len, async);
+
+               if (ret != 0) {
+                       dev_err(map->dev, "Failed to schedule write: %d\n",
+                               ret);
+
+                       spin_lock_irqsave(&map->async_lock, flags);
+                       list_del(&async->list);
+                       spin_unlock_irqrestore(&map->async_lock, flags);
+
+                       kfree(async->work_buf);
+                       kfree(async);
+               }
+       }
+
        trace_regmap_hw_write_start(map->dev, reg,
                                    val_len / map->format.val_bytes);
 
@@ -948,8 +1045,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
         * send the work_buf directly, otherwise try to do a gather
         * write.
         */
-       if (val == (map->work_buf + map->format.pad_bytes +
-                   map->format.reg_bytes))
+       if (val == work_val)
                ret = map->bus->write(map->bus_context, map->work_buf,
                                      map->format.reg_bytes +
                                      map->format.pad_bytes +
@@ -981,14 +1077,62 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
        return ret;
 }
 
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+                                      unsigned int val)
+{
+       int ret;
+       struct regmap_range_node *range;
+       struct regmap *map = context;
+
+       BUG_ON(!map->bus || !map->format.format_write);
+
+       range = _regmap_range_lookup(map, reg);
+       if (range) {
+               ret = _regmap_select_page(map, &reg, range, 1);
+               if (ret != 0)
+                       return ret;
+       }
+
+       map->format.format_write(map, reg, val);
+
+       trace_regmap_hw_write_start(map->dev, reg, 1);
+
+       ret = map->bus->write(map->bus_context, map->work_buf,
+                             map->format.buf_size);
+
+       trace_regmap_hw_write_done(map->dev, reg, 1);
+
+       return ret;
+}
+
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+                                unsigned int val)
+{
+       struct regmap *map = context;
+
+       BUG_ON(!map->bus || !map->format.format_val);
+
+       map->format.format_val(map->work_buf + map->format.reg_bytes
+                              + map->format.pad_bytes, val, 0);
+       return _regmap_raw_write(map, reg,
+                                map->work_buf +
+                                map->format.reg_bytes +
+                                map->format.pad_bytes,
+                                map->format.val_bytes, false);
+}
+
+static inline void *_regmap_map_get_context(struct regmap *map)
+{
+       return (map->bus) ? map : map->bus_context;
+}
+
 int _regmap_write(struct regmap *map, unsigned int reg,
                  unsigned int val)
 {
-       struct regmap_range_node *range;
        int ret;
-       BUG_ON(!map->format.format_write && !map->format.format_val);
+       void *context = _regmap_map_get_context(map);
 
-       if (!map->cache_bypass && map->format.format_write) {
+       if (!map->cache_bypass && !map->defer_caching) {
                ret = regcache_write(map, reg, val);
                if (ret != 0)
                        return ret;
@@ -1005,33 +1149,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
 
        trace_regmap_reg_write(map->dev, reg, val);
 
-       if (map->format.format_write) {
-               range = _regmap_range_lookup(map, reg);
-               if (range) {
-                       ret = _regmap_select_page(map, &reg, range, 1);
-                       if (ret != 0)
-                               return ret;
-               }
-
-               map->format.format_write(map, reg, val);
-
-               trace_regmap_hw_write_start(map->dev, reg, 1);
-
-               ret = map->bus->write(map->bus_context, map->work_buf,
-                                     map->format.buf_size);
-
-               trace_regmap_hw_write_done(map->dev, reg, 1);
-
-               return ret;
-       } else {
-               map->format.format_val(map->work_buf + map->format.reg_bytes
-                                      + map->format.pad_bytes, val, 0);
-               return _regmap_raw_write(map, reg,
-                                        map->work_buf +
-                                        map->format.reg_bytes +
-                                        map->format.pad_bytes,
-                                        map->format.val_bytes);
-       }
+       return map->reg_write(context, reg, val);
 }
 
 /**
@@ -1082,6 +1200,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
 {
        int ret;
 
+       if (!map->bus)
+               return -EINVAL;
        if (val_len % map->format.val_bytes)
                return -EINVAL;
        if (reg % map->reg_stride)
@@ -1089,7 +1209,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
 
        map->lock(map->lock_arg);
 
-       ret = _regmap_raw_write(map, reg, val, val_len);
+       ret = _regmap_raw_write(map, reg, val, val_len, false);
 
        map->unlock(map->lock_arg);
 
@@ -1106,7 +1226,7 @@ EXPORT_SYMBOL_GPL(regmap_raw_write);
  * @val_count: Number of registers to write
  *
  * This function is intended to be used for writing a large block of
- * data to be device either in single transfer or multiple transfer.
+ * data to the device either in single transfer or multiple transfer.
  *
  * A value of zero will be returned on success, a negative errno will
  * be returned in error cases.
@@ -1118,6 +1238,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
        size_t val_bytes = map->format.val_bytes;
        void *wval;
 
+       if (!map->bus)
+               return -EINVAL;
        if (!map->format.parse_val)
                return -EINVAL;
        if (reg % map->reg_stride)
@@ -1145,14 +1267,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
        if (map->use_single_rw) {
                for (i = 0; i < val_count; i++) {
                        ret = regmap_raw_write(map,
-                                               reg + (i * map->reg_stride),
-                                               val + (i * val_bytes),
-                                               val_bytes);
+                                              reg + (i * map->reg_stride),
+                                              val + (i * val_bytes),
+                                              val_bytes);
                        if (ret != 0)
                                return ret;
                }
        } else {
-               ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+               ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
+                                       false);
        }
 
        if (val_bytes != 1)
@@ -1164,6 +1287,48 @@ out:
 }
 EXPORT_SYMBOL_GPL(regmap_bulk_write);
 
+/**
+ * regmap_raw_write_async(): Write raw values to one or more registers
+ *                           asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ *       device.  Must be valid until regmap_async_complete() is called.
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device.  No formatting will be done on the data provided.
+ *
+ * If supported by the underlying bus the write will be scheduled
+ * asynchronously, helping maximise I/O speed on higher speed buses
+ * like SPI.  regmap_async_complete() can be called to ensure that all
+ * asynchrnous writes have been completed.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+                          const void *val, size_t val_len)
+{
+       int ret;
+
+       if (val_len % map->format.val_bytes)
+               return -EINVAL;
+       if (reg % map->reg_stride)
+               return -EINVAL;
+
+       map->lock(map->lock_arg);
+
+       ret = _regmap_raw_write(map, reg, val, val_len, true);
+
+       map->unlock(map->lock_arg);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+
 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
                            unsigned int val_len)
 {
@@ -1171,6 +1336,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
        u8 *u8 = map->work_buf;
        int ret;
 
+       BUG_ON(!map->bus);
+
        range = _regmap_range_lookup(map, reg);
        if (range) {
                ret = _regmap_select_page(map, &reg, range,
@@ -1202,10 +1369,29 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
        return ret;
 }
 
+static int _regmap_bus_read(void *context, unsigned int reg,
+                           unsigned int *val)
+{
+       int ret;
+       struct regmap *map = context;
+
+       if (!map->format.parse_val)
+               return -EINVAL;
+
+       ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+       if (ret == 0)
+               *val = map->format.parse_val(map->work_buf);
+
+       return ret;
+}
+
 static int _regmap_read(struct regmap *map, unsigned int reg,
                        unsigned int *val)
 {
        int ret;
+       void *context = _regmap_map_get_context(map);
+
+       BUG_ON(!map->reg_read);
 
        if (!map->cache_bypass) {
                ret = regcache_read(map, reg, val);
@@ -1213,26 +1399,21 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
                        return 0;
        }
 
-       if (!map->format.parse_val)
-               return -EINVAL;
-
        if (map->cache_only)
                return -EBUSY;
 
-       ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+       ret = map->reg_read(context, reg, val);
        if (ret == 0) {
-               *val = map->format.parse_val(map->work_buf);
-
 #ifdef LOG_DEVICE
                if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
                        dev_info(map->dev, "%x => %x\n", reg, *val);
 #endif
 
                trace_regmap_reg_read(map->dev, reg, *val);
-       }
 
-       if (ret == 0 && !map->cache_bypass)
-               regcache_write(map, reg, *val);
+               if (!map->cache_bypass)
+                       regcache_write(map, reg, *val);
+       }
 
        return ret;
 }
@@ -1283,6 +1464,8 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
        unsigned int v;
        int ret, i;
 
+       if (!map->bus)
+               return -EINVAL;
        if (val_len % map->format.val_bytes)
                return -EINVAL;
        if (reg % map->reg_stride)
@@ -1334,6 +1517,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
        size_t val_bytes = map->format.val_bytes;
        bool vol = regmap_volatile_range(map, reg, val_count);
 
+       if (!map->bus)
+               return -EINVAL;
        if (!map->format.parse_val)
                return -EINVAL;
        if (reg % map->reg_stride)
@@ -1450,6 +1635,68 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
 
+void regmap_async_complete_cb(struct regmap_async *async, int ret)
+{
+       struct regmap *map = async->map;
+       bool wake;
+
+       spin_lock(&map->async_lock);
+
+       list_del(&async->list);
+       wake = list_empty(&map->async_list);
+
+       if (ret != 0)
+               map->async_ret = ret;
+
+       spin_unlock(&map->async_lock);
+
+       schedule_work(&async->cleanup);
+
+       if (wake)
+               wake_up(&map->async_waitq);
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
+
+static int regmap_async_is_done(struct regmap *map)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&map->async_lock, flags);
+       ret = list_empty(&map->async_list);
+       spin_unlock_irqrestore(&map->async_lock, flags);
+
+       return ret;
+}
+
+/**
+ * regmap_async_complete: Ensure all asynchronous I/O has completed.
+ *
+ * @map: Map to operate on.
+ *
+ * Blocks until any pending asynchronous I/O has completed.  Returns
+ * an error code for any failed I/O operations.
+ */
+int regmap_async_complete(struct regmap *map)
+{
+       unsigned long flags;
+       int ret;
+
+       /* Nothing to do with no async support */
+       if (!map->bus->async_write)
+               return 0;
+
+       wait_event(map->async_waitq, regmap_async_is_done(map));
+
+       spin_lock_irqsave(&map->async_lock, flags);
+       ret = map->async_ret;
+       map->async_ret = 0;
+       spin_unlock_irqrestore(&map->async_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete);
+
 /**
  * regmap_register_patch: Register and apply register updates to be applied
  *                        on device initialistion
index 19e3fbfd5757368790980dfec756135e981b0eaf..cb0c4548857282c3bc6b38cfa836ad9474370604 100644 (file)
@@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 /* driver_gpio.c */
 int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
 #else
 static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
 {
        return -ENOTSUPP;
 }
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+       return 0;
+}
 #endif /* CONFIG_BCMA_DRIVER_GPIO */
 
 #endif
index dbda91e4dff5189ccabbb710b7a2a11f0e099651..1f0b83e18f6827f5003669b4015c2a89b2d369c5 100644 (file)
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
        struct bcma_bus *bus = cc->core->bus;
 
        if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
-           cc->core->id.rev != 0x38) {
+           cc->core->id.rev != 38) {
                bcma_err(bus, "NAND flash on unsupported board!\n");
                return -ENOTSUPP;
        }
index 9a6f585da2d9f21d232ac4b58cc3a36b442d9b36..71f755c06fc6637497665f31d15f6432d49c3075 100644 (file)
@@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 
        return gpiochip_add(chip);
 }
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+       return gpiochip_remove(&cc->gpio);
+}
index 4a92f647b58bdef4fa28f28b772d960036469c78..324f9debda88007ac096320dae35340db99e3fe5 100644 (file)
@@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
 void bcma_bus_unregister(struct bcma_bus *bus)
 {
        struct bcma_device *cores[3];
+       int err;
+
+       err = bcma_gpio_unregister(&bus->drv_cc);
+       if (err == -EBUSY)
+               bcma_err(bus, "Some GPIOs are still in use.\n");
+       else if (err)
+               bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
 
        cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
        cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
index f58a4a4b4dfb3d1042113bbcbf34ee7c8280b7e8..2b8303ad63c97966a3913854825cee60bec5320c 100644 (file)
@@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
 }
 
 /* must hold resource->req_lock */
-static void start_new_tl_epoch(struct drbd_tconn *tconn)
+void start_new_tl_epoch(struct drbd_tconn *tconn)
 {
        /* no point closing an epoch, if it is empty, anyways. */
        if (tconn->current_tle_writes == 0)
index 016de6b8bb57c2a646b348eb903fc870536a4512..c08d22964d06784024481cd0005784306341b891 100644 (file)
@@ -267,6 +267,7 @@ struct bio_and_error {
        int error;
 };
 
+extern void start_new_tl_epoch(struct drbd_tconn *tconn);
 extern void drbd_req_destroy(struct kref *kref);
 extern void _req_may_be_done(struct drbd_request *req,
                struct bio_and_error *m);
index 53bf6182bac4077afaa26bdeb94bb52e2414c76b..0fe220cfb9e9d31ce1dffac5e3365311076f66da 100644 (file)
@@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
        enum drbd_state_rv rv = SS_SUCCESS;
        enum sanitize_state_warnings ssw;
        struct after_state_chg_work *ascw;
+       bool did_remote, should_do_remote;
 
        os = drbd_read_state(mdev);
 
@@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
            (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
                atomic_inc(&mdev->local_cnt);
 
+       did_remote = drbd_should_do_remote(mdev->state);
        mdev->state.i = ns.i;
+       should_do_remote = drbd_should_do_remote(mdev->state);
        mdev->tconn->susp = ns.susp;
        mdev->tconn->susp_nod = ns.susp_nod;
        mdev->tconn->susp_fen = ns.susp_fen;
 
+       /* put replicated vs not-replicated requests in seperate epochs */
+       if (did_remote != should_do_remote)
+               start_new_tl_epoch(mdev->tconn);
+
        if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
                drbd_print_uuids(mdev, "attached to UUIDs");
 
index 9694dd99bbbc7253af1c3626823899f985b1afc2..3fd100990453b76f2297301e662e808819019ff2 100644 (file)
@@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
                }
        }
 
-       if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+       if (cmdto_cnt) {
                print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
-
-               mtip_restart_port(port);
+               if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+                       mtip_restart_port(port);
+                       wake_up_interruptible(&port->svc_wait);
+               }
                clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-               wake_up_interruptible(&port->svc_wait);
        }
 
        if (port->ic_pause_timer) {
@@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
         * Delete our gendisk structure. This also removes the device
         * from /dev
         */
-       del_gendisk(dd->disk);
+       if (dd->disk) {
+               if (dd->disk->queue)
+                       del_gendisk(dd->disk);
+               else
+                       put_disk(dd->disk);
+       }
 
        spin_lock(&rssd_index_lock);
        ida_remove(&rssd_index_ida, dd->index);
@@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
                "Shutting down %s ...\n", dd->disk->disk_name);
 
        /* Delete our gendisk structure, and cleanup the blk queue. */
-       del_gendisk(dd->disk);
+       if (dd->disk) {
+               if (dd->disk->queue)
+                       del_gendisk(dd->disk);
+               else
+                       put_disk(dd->disk);
+       }
+
 
        spin_lock(&rssd_index_lock);
        ida_remove(&rssd_index_ida, dd->index);
index 564156a8e57251f68181a706f959f5b5299bfdf5..5814deb6963d52a875708e78a4a3a38eb148145e 100644 (file)
@@ -461,7 +461,7 @@ static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
        int op_len, err;
        void *req_buf;
 
-       if (!(((u64)1 << ((u64)op - 1)) & port->operations))
+       if (!(((u64)1 << (u64)op) & port->operations))
                return -EOPNOTSUPP;
 
        switch (op) {
index 9d8409c02082f5fbb75c456faeb9323b86fbe9f3..8ad21a25bc0d92cc80f48f69f4556bdcc6ef6fa2 100644 (file)
@@ -889,6 +889,7 @@ static void virtblk_remove(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
        int index = vblk->index;
+       int refc;
 
        /* Prevent config work handler from accessing the device. */
        mutex_lock(&vblk->config_lock);
@@ -903,11 +904,15 @@ static void virtblk_remove(struct virtio_device *vdev)
 
        flush_work(&vblk->config_work);
 
+       refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
        put_disk(vblk->disk);
        mempool_destroy(vblk->pool);
        vdev->config->del_vqs(vdev);
        kfree(vblk);
-       ida_simple_remove(&vd_index_ida, index);
+
+       /* Only free device id if we don't have any users */
+       if (refc == 1)
+               ida_simple_remove(&vd_index_ida, index);
 }
 
 #ifdef CONFIG_PM
index 74374fb762aa5bcf835e73a7390f12b7403525ee..5ac841ff6cc73acd59c7439722ab799a27fac93b 100644 (file)
@@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
 static void make_response(struct xen_blkif *blkif, u64 id,
                          unsigned short op, int st);
 
-#define foreach_grant(pos, rbtree, node) \
-       for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
+#define foreach_grant_safe(pos, n, rbtree, node) \
+       for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
+            (n) = rb_next(&(pos)->node); \
             &(pos)->node != NULL; \
-            (pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
+            (pos) = container_of(n, typeof(*(pos)), node), \
+            (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
 
 
 static void add_persistent_gnt(struct rb_root *root,
@@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct persistent_gnt *persistent_gnt;
+       struct rb_node *n;
        int ret = 0;
        int segs_to_unmap = 0;
 
-       foreach_grant(persistent_gnt, root, node) {
+       foreach_grant_safe(persistent_gnt, n, root, node) {
                BUG_ON(persistent_gnt->handle ==
                        BLKBACK_INVALID_HANDLE);
                gnttab_set_unmap_op(&unmap[segs_to_unmap],
@@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
                        persistent_gnt->handle);
 
                pages[segs_to_unmap] = persistent_gnt->page;
-               rb_erase(&persistent_gnt->node, root);
-               kfree(persistent_gnt);
-               num--;
 
                if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
                        !rb_next(&persistent_gnt->node)) {
@@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
                        BUG_ON(ret);
                        segs_to_unmap = 0;
                }
+
+               rb_erase(&persistent_gnt->node, root);
+               kfree(persistent_gnt);
+               num--;
        }
        BUG_ON(num != 0);
 }
index 96e9b00db08118a164d03845df86139169e78f71..11043c18ac5ab01fcfd2e492fff70a87260143fd 100644 (file)
@@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
 {
        struct llist_node *all_gnts;
        struct grant *persistent_gnt;
+       struct llist_node *n;
 
        /* Prevent new requests being issued until we fix things up. */
        spin_lock_irq(&info->io_lock);
@@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
        /* Remove all persistent grants */
        if (info->persistent_gnts_c) {
                all_gnts = llist_del_all(&info->persistent_gnts);
-               llist_for_each_entry(persistent_gnt, all_gnts, node) {
+               llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
                        gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
                        __free_page(pfn_to_page(persistent_gnt->pfn));
                        kfree(persistent_gnt);
@@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
 static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                             struct blkif_response *bret)
 {
-       int i;
+       int i = 0;
        struct bio_vec *bvec;
        struct req_iterator iter;
        unsigned long flags;
@@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                 */
                rq_for_each_segment(bvec, s->request, iter) {
                        BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
-                       i = offset >> PAGE_SHIFT;
+                       if (bvec->bv_offset < offset)
+                               i++;
                        BUG_ON(i >= s->req.u.rw.nr_segments);
                        shared_data = kmap_atomic(
                                pfn_to_page(s->grants_used[i]->pfn));
@@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                                bvec->bv_len);
                        bvec_kunmap_irq(bvec_data, &flags);
                        kunmap_atomic(shared_data);
-                       offset += bvec->bv_len;
+                       offset = bvec->bv_offset + bvec->bv_len;
                }
        }
        /* Add the persistent grant into the list of free grants */
index b00000e8aef6f9cb06955049d3fa4faaba25e38b..33c9a44a967899ac5b46e9b3e9951b2e80ff164d 100644 (file)
@@ -77,10 +77,15 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0CF3, 0x311D) },
        { USB_DEVICE(0x13d3, 0x3375) },
        { USB_DEVICE(0x04CA, 0x3005) },
+       { USB_DEVICE(0x04CA, 0x3006) },
+       { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0489, 0xe057) },
+       { USB_DEVICE(0x13d3, 0x3393) },
+       { USB_DEVICE(0x0489, 0xe04e) },
+       { USB_DEVICE(0x0489, 0xe056) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
@@ -104,10 +109,15 @@ static struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
index a1d4ede5b892d4516b294f91aca91633864e7d43..7e351e345476c1bcc2141f95e41fe43a3e65e612 100644 (file)
@@ -135,10 +135,15 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
index 684b0d53764f38d218b8cff41d5a9e0c3e248979..ee4dbeafb377c5a7786e97372962db2a05904427 100644 (file)
@@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
        /* Disable interrupts for vqs */
        vdev->config->reset(vdev);
        /* Finish up work that's lined up */
-       cancel_work_sync(&portdev->control_work);
+       if (use_multiport(portdev))
+               cancel_work_sync(&portdev->control_work);
 
        list_for_each_entry_safe(port, port2, &portdev->ports, list)
                unplug_port(port);
index ff004578a119e72d989d3c9baeb45dbf84f17b7b..9dd2551a0a41aebdb10f780b6657e9f3ce3de94c 100644 (file)
@@ -124,7 +124,7 @@ void __init of_cpu_clk_setup(struct device_node *node)
 
        clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
        if (WARN_ON(!clks))
-               return;
+               goto clks_out;
 
        for_each_node_by_type(dn, "cpu") {
                struct clk_init_data init;
@@ -134,11 +134,11 @@ void __init of_cpu_clk_setup(struct device_node *node)
                int cpu, err;
 
                if (WARN_ON(!clk_name))
-                       return;
+                       goto bail_out;
 
                err = of_property_read_u32(dn, "reg", &cpu);
                if (WARN_ON(err))
-                       return;
+                       goto bail_out;
 
                sprintf(clk_name, "cpu%d", cpu);
                parent_clk = of_clk_get(node, 0);
@@ -167,6 +167,9 @@ void __init of_cpu_clk_setup(struct device_node *node)
        return;
 bail_out:
        kfree(clks);
+       while(ncpus--)
+               kfree(cpuclk[ncpus].clk_name);
+clks_out:
        kfree(cpuclk);
 }
 
index 934854ae5eb4a436205d644c76c8011a5c41de3e..7227cd7340423c595a2a1b29488e864662198634 100644 (file)
@@ -106,7 +106,7 @@ config X86_POWERNOW_K7_ACPI
 config X86_POWERNOW_K8
        tristate "AMD Opteron/Athlon64 PowerNow!"
        select CPU_FREQ_TABLE
-       depends on ACPI && ACPI_PROCESSOR
+       depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
        help
          This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
          Support for K10 and newer processors is now in acpi-cpufreq.
index 0d048f6a2b23a3bf9476a658cac846ccd321bd3c..7b0d49d78c61fc6c9fd312b0d05663ad42edf099 100644 (file)
@@ -1030,4 +1030,11 @@ MODULE_PARM_DESC(acpi_pstate_strict,
 late_initcall(acpi_cpufreq_init);
 module_exit(acpi_cpufreq_exit);
 
+static const struct x86_cpu_id acpi_cpufreq_ids[] = {
+       X86_FEATURE_MATCH(X86_FEATURE_ACPI),
+       X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
+
 MODULE_ALIAS("acpi");
index 52bf36d599f575fcdd0bfe4fc3788db0d492e5a8..debc5a7c8db6207feeb9c428ccc44b8fcaed8d2f 100644 (file)
@@ -71,12 +71,15 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
        }
 
        if (cpu_reg) {
+               rcu_read_lock();
                opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
                if (IS_ERR(opp)) {
+                       rcu_read_unlock();
                        pr_err("failed to find OPP for %ld\n", freq_Hz);
                        return PTR_ERR(opp);
                }
                volt = opp_get_voltage(opp);
+               rcu_read_unlock();
                tol = volt * voltage_tolerance / 100;
                volt_old = regulator_get_voltage(cpu_reg);
        }
@@ -236,12 +239,14 @@ static int cpu0_cpufreq_driver_init(void)
                 */
                for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
                        ;
+               rcu_read_lock();
                opp = opp_find_freq_exact(cpu_dev,
                                freq_table[0].frequency * 1000, true);
                min_uV = opp_get_voltage(opp);
                opp = opp_find_freq_exact(cpu_dev,
                                freq_table[i-1].frequency * 1000, true);
                max_uV = opp_get_voltage(opp);
+               rcu_read_unlock();
                ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
                if (ret > 0)
                        transition_latency += ret * 1000;
index 1f3417a8322dfe8b0ca773873e058f7eb3a6c283..97102b05843fa580435ae0ae5b872d9467b577df 100644 (file)
@@ -110,13 +110,16 @@ static int omap_target(struct cpufreq_policy *policy,
        freq = ret;
 
        if (mpu_reg) {
+               rcu_read_lock();
                opp = opp_find_freq_ceil(mpu_dev, &freq);
                if (IS_ERR(opp)) {
+                       rcu_read_unlock();
                        dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
                                __func__, freqs.new);
                        return -EINVAL;
                }
                volt = opp_get_voltage(opp);
+               rcu_read_unlock();
                tol = volt * OPP_TOLERANCE / 100;
                volt_old = regulator_get_voltage(mpu_reg);
        }
index fb4a7dd57f94f479adf8a3dd93e35e4f348b65d3..e1f6860e069c7350d3fbb5bb499a67bfc581f760 100644 (file)
@@ -69,24 +69,15 @@ int cpuidle_play_dead(void)
 {
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
-       int i, dead_state = -1;
-       int power_usage = INT_MAX;
+       int i;
 
        if (!drv)
                return -ENODEV;
 
        /* Find lowest-power state that supports long-term idle */
-       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
-               struct cpuidle_state *s = &drv->states[i];
-
-               if (s->power_usage < power_usage && s->enter_dead) {
-                       power_usage = s->power_usage;
-                       dead_state = i;
-               }
-       }
-
-       if (dead_state != -1)
-               return drv->states[dead_state].enter_dead(dev, dead_state);
+       for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
+               if (drv->states[i].enter_dead)
+                       return drv->states[i].enter_dead(dev, i);
 
        return -ENODEV;
 }
index c2b281afe0ed03ce13a3a3122d96084319a93635..422c7b69ba7c50e728fa3af5af346a85a553af8f 100644 (file)
@@ -19,34 +19,9 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
 static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
 static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
 
-static void set_power_states(struct cpuidle_driver *drv)
-{
-       int i;
-
-       /*
-        * cpuidle driver should set the drv->power_specified bit
-        * before registering if the driver provides
-        * power_usage numbers.
-        *
-        * If power_specified is not set,
-        * we fill in power_usage with decreasing values as the
-        * cpuidle code has an implicit assumption that state Cn
-        * uses less power than C(n-1).
-        *
-        * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
-        * an power value of -1.  So we use -2, -3, etc, for other
-        * c-states.
-        */
-       for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
-               drv->states[i].power_usage = -1 - i;
-}
-
 static void __cpuidle_driver_init(struct cpuidle_driver *drv)
 {
        drv->refcnt = 0;
-
-       if (!drv->power_specified)
-               set_power_states(drv);
 }
 
 static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
index 20ea33afdda10c3e80ea99e716790c4494babf3e..fe343a06b7da3278ba68c9c15c261ea14afde685 100644 (file)
@@ -312,7 +312,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
        struct menu_device *data = &__get_cpu_var(menu_devices);
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
-       int power_usage = INT_MAX;
        int i;
        int multiplier;
        struct timespec t;
@@ -383,11 +382,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                if (s->exit_latency * multiplier > data->predicted_us)
                        continue;
 
-               if (s->power_usage < power_usage) {
-                       power_usage = s->power_usage;
-                       data->last_state_idx = i;
-                       data->exit_us = s->exit_latency;
-               }
+               data->last_state_idx = i;
+               data->exit_us = s->exit_latency;
        }
 
        /* not deepest C-state chosen for low predicted residency */
index 340942946106d22489124923a2ccd766bc22f2ef..428754af62366cda8fe71dee47b128c5da602969 100644 (file)
@@ -374,7 +374,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
 
        /* state statistics */
-       for (i = 0; i < drv->state_count; i++) {
+       for (i = 0; i < device->state_count; i++) {
                kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
                if (!kobj)
                        goto error_state;
index 53766f39aaddfd5db9147842b40825fb1399453b..3b367973a8028c5d1cd32bd823bcf9e005d269b6 100644 (file)
@@ -994,6 +994,11 @@ module_exit(devfreq_exit);
  * @freq:      The frequency given to target function
  * @flags:     Flags handed from devfreq framework.
  *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. The reason for the same is that the opp pointer which is
+ * returned will remain valid for use with opp_get_{voltage, freq} only while
+ * under the locked area. The pointer returned must be used prior to unlocking
+ * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
 struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
                                    u32 flags)
index 80c745e83082bf6d9dade7a81dffbb89931632a7..46d94e9e95b53ae033af012ef745822e0b7f5ccd 100644 (file)
@@ -73,6 +73,16 @@ enum busclk_level_idx {
 #define EX4210_LV_NUM  (LV_2 + 1)
 #define EX4x12_LV_NUM  (LV_4 + 1)
 
+/**
+ * struct busfreq_opp_info - opp information for bus
+ * @rate:      Frequency in hertz
+ * @volt:      Voltage in microvolts corresponding to this OPP
+ */
+struct busfreq_opp_info {
+       unsigned long rate;
+       unsigned long volt;
+};
+
 struct busfreq_data {
        enum exynos4_busf_type type;
        struct device *dev;
@@ -80,7 +90,7 @@ struct busfreq_data {
        bool disabled;
        struct regulator *vdd_int;
        struct regulator *vdd_mif; /* Exynos4412/4212 only */
-       struct opp *curr_opp;
+       struct busfreq_opp_info curr_oppinfo;
        struct exynos4_ppmu dmc[2];
 
        struct notifier_block pm_notifier;
@@ -296,13 +306,14 @@ static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
 };
 
 
-static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4210_set_busclk(struct busfreq_data *data,
+                                struct busfreq_opp_info *oppi)
 {
        unsigned int index;
        unsigned int tmp;
 
        for (index = LV_0; index < EX4210_LV_NUM; index++)
-               if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+               if (oppi->rate == exynos4210_busclk_table[index].clk)
                        break;
 
        if (index == EX4210_LV_NUM)
@@ -361,13 +372,14 @@ static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
        return 0;
 }
 
-static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+static int exynos4x12_set_busclk(struct busfreq_data *data,
+                                struct busfreq_opp_info *oppi)
 {
        unsigned int index;
        unsigned int tmp;
 
        for (index = LV_0; index < EX4x12_LV_NUM; index++)
-               if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+               if (oppi->rate == exynos4x12_mifclk_table[index].clk)
                        break;
 
        if (index == EX4x12_LV_NUM)
@@ -576,11 +588,12 @@ static int exynos4x12_get_intspec(unsigned long mifclk)
        return -EINVAL;
 }
 
-static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
-                              struct opp *oldopp)
+static int exynos4_bus_setvolt(struct busfreq_data *data,
+                              struct busfreq_opp_info *oppi,
+                              struct busfreq_opp_info *oldoppi)
 {
        int err = 0, tmp;
-       unsigned long volt = opp_get_voltage(opp);
+       unsigned long volt = oppi->volt;
 
        switch (data->type) {
        case TYPE_BUSF_EXYNOS4210:
@@ -595,11 +608,11 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
                if (err)
                        break;
 
-               tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+               tmp = exynos4x12_get_intspec(oppi->rate);
                if (tmp < 0) {
                        err = tmp;
                        regulator_set_voltage(data->vdd_mif,
-                                             opp_get_voltage(oldopp),
+                                             oldoppi->volt,
                                              MAX_SAFEVOLT);
                        break;
                }
@@ -609,7 +622,7 @@ static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
                /*  Try to recover */
                if (err)
                        regulator_set_voltage(data->vdd_mif,
-                                             opp_get_voltage(oldopp),
+                                             oldoppi->volt,
                                              MAX_SAFEVOLT);
                break;
        default:
@@ -626,17 +639,26 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data *data = platform_get_drvdata(pdev);
-       struct opp *opp = devfreq_recommended_opp(dev, _freq, flags);
-       unsigned long freq = opp_get_freq(opp);
-       unsigned long old_freq = opp_get_freq(data->curr_opp);
+       struct opp *opp;
+       unsigned long freq;
+       unsigned long old_freq = data->curr_oppinfo.rate;
+       struct busfreq_opp_info new_oppinfo;
 
-       if (IS_ERR(opp))
+       rcu_read_lock();
+       opp = devfreq_recommended_opp(dev, _freq, flags);
+       if (IS_ERR(opp)) {
+               rcu_read_unlock();
                return PTR_ERR(opp);
+       }
+       new_oppinfo.rate = opp_get_freq(opp);
+       new_oppinfo.volt = opp_get_voltage(opp);
+       rcu_read_unlock();
+       freq = new_oppinfo.rate;
 
        if (old_freq == freq)
                return 0;
 
-       dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+       dev_dbg(dev, "targetting %lukHz %luuV\n", freq, new_oppinfo.volt);
 
        mutex_lock(&data->lock);
 
@@ -644,17 +666,18 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
                goto out;
 
        if (old_freq < freq)
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
        if (err)
                goto out;
 
        if (old_freq != freq) {
                switch (data->type) {
                case TYPE_BUSF_EXYNOS4210:
-                       err = exynos4210_set_busclk(data, opp);
+                       err = exynos4210_set_busclk(data, &new_oppinfo);
                        break;
                case TYPE_BUSF_EXYNOS4x12:
-                       err = exynos4x12_set_busclk(data, opp);
+                       err = exynos4x12_set_busclk(data, &new_oppinfo);
                        break;
                default:
                        err = -EINVAL;
@@ -664,11 +687,12 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
                goto out;
 
        if (old_freq > freq)
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
        if (err)
                goto out;
 
-       data->curr_opp = opp;
+       data->curr_oppinfo = new_oppinfo;
 out:
        mutex_unlock(&data->lock);
        return err;
@@ -702,7 +726,7 @@ static int exynos4_bus_get_dev_status(struct device *dev,
 
        exynos4_read_ppmu(data);
        busier_dmc = exynos4_get_busier_dmc(data);
-       stat->current_frequency = opp_get_freq(data->curr_opp);
+       stat->current_frequency = data->curr_oppinfo.rate;
 
        if (busier_dmc)
                addr = S5P_VA_DMC1;
@@ -933,6 +957,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
        struct busfreq_data *data = container_of(this, struct busfreq_data,
                                                 pm_notifier);
        struct opp *opp;
+       struct busfreq_opp_info new_oppinfo;
        unsigned long maxfreq = ULONG_MAX;
        int err = 0;
 
@@ -943,18 +968,29 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
 
                data->disabled = true;
 
+               rcu_read_lock();
                opp = opp_find_freq_floor(data->dev, &maxfreq);
+               if (IS_ERR(opp)) {
+                       rcu_read_unlock();
+                       dev_err(data->dev, "%s: unable to find a min freq\n",
+                               __func__);
+                       return PTR_ERR(opp);
+               }
+               new_oppinfo.rate = opp_get_freq(opp);
+               new_oppinfo.volt = opp_get_voltage(opp);
+               rcu_read_unlock();
 
-               err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+               err = exynos4_bus_setvolt(data, &new_oppinfo,
+                                         &data->curr_oppinfo);
                if (err)
                        goto unlock;
 
                switch (data->type) {
                case TYPE_BUSF_EXYNOS4210:
-                       err = exynos4210_set_busclk(data, opp);
+                       err = exynos4210_set_busclk(data, &new_oppinfo);
                        break;
                case TYPE_BUSF_EXYNOS4x12:
-                       err = exynos4x12_set_busclk(data, opp);
+                       err = exynos4x12_set_busclk(data, &new_oppinfo);
                        break;
                default:
                        err = -EINVAL;
@@ -962,7 +998,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
                if (err)
                        goto unlock;
 
-               data->curr_opp = opp;
+               data->curr_oppinfo = new_oppinfo;
 unlock:
                mutex_unlock(&data->lock);
                if (err)
@@ -1027,13 +1063,17 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
                }
        }
 
+       rcu_read_lock();
        opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
        if (IS_ERR(opp)) {
+               rcu_read_unlock();
                dev_err(dev, "Invalid initial frequency %lu kHz.\n",
                        exynos4_devfreq_profile.initial_freq);
                return PTR_ERR(opp);
        }
-       data->curr_opp = opp;
+       data->curr_oppinfo.rate = opp_get_freq(opp);
+       data->curr_oppinfo.volt = opp_get_voltage(opp);
+       rcu_read_unlock();
 
        platform_set_drvdata(pdev, data);
 
index dbf0e6f8de8a8c64d4b29b33b9c834ae3ab904c7..a7dcf78b1ff809e4a6a865884779856330d15510 100644 (file)
@@ -684,9 +684,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                        break;
                }
 
-               imxdmac->hw_chaining = 1;
-               if (!imxdma_hw_chain(imxdmac))
-                       return -EINVAL;
+               imxdmac->hw_chaining = 0;
+
                imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
                        ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
                        CCR_REN;
index e5fc944de1f025edd5492e4208ae0dce5a5a5093..3e9d66920eb3491d85b8c6027ac786a76242e107 100644 (file)
@@ -951,7 +951,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
                        goto free_resources;
                }
        }
-       dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
+       dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
 
        /* skip validate if the capability is not present */
        if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
index c39e61bc8172e5c277dfd4cdb0997cfcc08e407f..3cad856fe67f9f9518cbf79b0f02bd41c32733b8 100644 (file)
@@ -266,6 +266,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
                if (async_tx_test_ack(&dma_desc->txd)) {
                        list_del(&dma_desc->node);
                        spin_unlock_irqrestore(&tdc->lock, flags);
+                       dma_desc->txd.flags = 0;
                        return dma_desc;
                }
        }
@@ -1050,7 +1051,9 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                                        TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
        ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
 
-       csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
+       csr |= TEGRA_APBDMA_CSR_FLOW;
+       if (flags & DMA_PREP_INTERRUPT)
+               csr |= TEGRA_APBDMA_CSR_IE_EOC;
        csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
 
        apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1095,7 +1098,8 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                mem += len;
        }
        sg_req->last_sg = true;
-       dma_desc->txd.flags = 0;
+       if (flags & DMA_CTRL_ACK)
+               dma_desc->txd.flags = DMA_CTRL_ACK;
 
        /*
         * Make sure that mode should not be conflicting with currently
index ad8bf2aa629d3b23510053f6c88b23c0c2d9d504..2d3f8825e8b80ce88e9a6f5c5a0cdfe7ffd5145a 100644 (file)
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
  *
  *FIXME: Produce a better mapping/linearisation.
  */
-struct scrubrate {
+static const struct scrubrate {
        u32 scrubval;           /* bit pattern for scrub rate */
        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
 } scrubrates[] = {
@@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
  * DRAM base/limit associated with node_id
  */
 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
-                                  unsigned nid)
+                                  u8 nid)
 {
        u64 addr;
 
@@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
                                                u64 sys_addr)
 {
        struct amd64_pvt *pvt;
-       unsigned node_id;
+       u8 node_id;
        u32 intlv_en, bits;
 
        /*
@@ -939,7 +939,8 @@ static u64 get_error_address(struct mce *m)
                struct amd64_pvt *pvt;
                u64 cc6_base, tmp_addr;
                u32 tmp;
-               u8 mce_nid, intlv_en;
+               u16 mce_nid;
+               u8 intlv_en;
 
                if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
                        return addr;
@@ -979,10 +980,29 @@ static u64 get_error_address(struct mce *m)
        return addr;
 }
 
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+                                               unsigned int device,
+                                               struct pci_dev *related)
+{
+       struct pci_dev *dev = NULL;
+
+       while ((dev = pci_get_device(vendor, device, dev))) {
+               if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
+                   (dev->bus->number == related->bus->number) &&
+                   (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+                       break;
+       }
+
+       return dev;
+}
+
 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
 {
+       struct amd_northbridge *nb;
+       struct pci_dev *misc, *f1 = NULL;
        struct cpuinfo_x86 *c = &boot_cpu_data;
        int off = range << 3;
+       u32 llim;
 
        amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
        amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@@ -996,30 +1016,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
        amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off,  &pvt->ranges[range].base.hi);
        amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
 
-       /* Factor in CC6 save area by reading dst node's limit reg */
-       if (c->x86 == 0x15) {
-               struct pci_dev *f1 = NULL;
-               u8 nid = dram_dst_node(pvt, range);
-               u32 llim;
+       /* F15h: factor in CC6 save area by reading dst node's limit reg */
+       if (c->x86 != 0x15)
+               return;
 
-               f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
-               if (WARN_ON(!f1))
-                       return;
+       nb = node_to_amd_nb(dram_dst_node(pvt, range));
+       if (WARN_ON(!nb))
+               return;
 
-               amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
+       misc = nb->misc;
+       f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
+       if (WARN_ON(!f1))
+               return;
 
-               pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+       amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
 
-                                           /* {[39:27],111b} */
-               pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
+       pvt->ranges[range].lim.lo &= GENMASK(0, 15);
 
-               pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+                                   /* {[39:27],111b} */
+       pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
 
-                                           /* [47:40] */
-               pvt->ranges[range].lim.hi |= llim >> 13;
+       pvt->ranges[range].lim.hi &= GENMASK(0, 7);
 
-               pci_dev_put(f1);
-       }
+                                   /* [47:40] */
+       pvt->ranges[range].lim.hi |= llim >> 13;
+
+       pci_dev_put(f1);
 }
 
 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@@ -1305,7 +1327,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
 }
 
 /* Convert the sys_addr to the normalized DCT address */
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
                                 u64 sys_addr, bool hi_rng,
                                 u32 dct_sel_base_addr)
 {
@@ -1381,7 +1403,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
  *     -EINVAL:  NOT FOUND
  *     0..csrow = Chip-Select Row
  */
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
 {
        struct mem_ctl_info *mci;
        struct amd64_pvt *pvt;
@@ -1672,23 +1694,6 @@ static struct amd64_family_type amd64_family_types[] = {
        },
 };
 
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
-                                               unsigned int device,
-                                               struct pci_dev *related)
-{
-       struct pci_dev *dev = NULL;
-
-       dev = pci_get_device(vendor, device, dev);
-       while (dev) {
-               if ((dev->bus->number == related->bus->number) &&
-                   (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
-                       break;
-               dev = pci_get_device(vendor, device, dev);
-       }
-
-       return dev;
-}
-
 /*
  * These are tables of eigenvectors (one per line) which can be used for the
  * construction of the syndrome tables. The modified syndrome search algorithm
@@ -1696,7 +1701,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
  *
  * Algorithm courtesy of Ross LaFetra from AMD.
  */
-static u16 x4_vectors[] = {
+static const u16 x4_vectors[] = {
        0x2f57, 0x1afe, 0x66cc, 0xdd88,
        0x11eb, 0x3396, 0x7f4c, 0xeac8,
        0x0001, 0x0002, 0x0004, 0x0008,
@@ -1735,7 +1740,7 @@ static u16 x4_vectors[] = {
        0x19a9, 0x2efe, 0xb5cc, 0x6f88,
 };
 
-static u16 x8_vectors[] = {
+static const u16 x8_vectors[] = {
        0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
        0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
        0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@@ -1757,7 +1762,7 @@ static u16 x8_vectors[] = {
        0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
 };
 
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
                           unsigned v_dim)
 {
        unsigned int i, err_sym;
@@ -2181,7 +2186,7 @@ static int init_csrows(struct mem_ctl_info *mci)
 }
 
 /* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
 {
        int cpu;
 
@@ -2191,7 +2196,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
 }
 
 /* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
+static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
 {
        cpumask_var_t mask;
        int cpu, nbe;
@@ -2224,7 +2229,7 @@ out:
        return ret;
 }
 
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
 {
        cpumask_var_t cmask;
        int cpu;
@@ -2262,7 +2267,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
        return 0;
 }
 
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
                                       struct pci_dev *F3)
 {
        bool ret = true;
@@ -2314,7 +2319,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
        return ret;
 }
 
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
                                        struct pci_dev *F3)
 {
        u32 value, mask = 0x3;          /* UECC/CECC enable */
@@ -2353,7 +2358,7 @@ static const char *ecc_msg =
        "'ecc_enable_override'.\n"
        " (Note that use of the override may cause unknown side effects.)\n";
 
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
+static bool ecc_enabled(struct pci_dev *F3, u16 nid)
 {
        u32 value;
        u8 ecc_en = 0;
@@ -2474,7 +2479,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
        struct mem_ctl_info *mci = NULL;
        struct edac_mc_layer layers[2];
        int err = 0, ret;
-       u8 nid = get_node_id(F2);
+       u16 nid = amd_get_node_id(F2);
 
        ret = -ENOMEM;
        pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2566,7 +2571,7 @@ err_ret:
 static int amd64_probe_one_instance(struct pci_dev *pdev,
                                    const struct pci_device_id *mc_type)
 {
-       u8 nid = get_node_id(pdev);
+       u16 nid = amd_get_node_id(pdev);
        struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
        struct ecc_settings *s;
        int ret = 0;
@@ -2616,7 +2621,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
 {
        struct mem_ctl_info *mci;
        struct amd64_pvt *pvt;
-       u8 nid = get_node_id(pdev);
+       u16 nid = amd_get_node_id(pdev);
        struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
        struct ecc_settings *s = ecc_stngs[nid];
 
index e864f407806c2bef8d0240e01df639fe51c9d909..35637d83f2351270f8b73a0358f8fe43b0bc6825 100644 (file)
 /* MSRs */
 #define MSR_MCGCTL_NBE                 BIT(4)
 
-/* AMD sets the first MC device at device ID 0x18. */
-static inline u8 get_node_id(struct pci_dev *pdev)
-{
-       return PCI_SLOT(pdev->devfn) - 0x18;
-}
-
 enum amd_families {
        K8_CPUS = 0,
        F10_CPUS,
@@ -340,7 +334,7 @@ struct amd64_pvt {
        /* pci_device handles which we utilize */
        struct pci_dev *F1, *F2, *F3;
 
-       unsigned mc_node_id;    /* MC index of this MC node */
+       u16 mc_node_id;         /* MC index of this MC node */
        int ext_model;          /* extended model value of this node */
        int channel_count;
 
@@ -393,7 +387,7 @@ struct err_info {
        u32 offset;
 };
 
-static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
 {
        u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
 
@@ -403,7 +397,7 @@ static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
        return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
 }
 
-static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
 {
        u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
 
index 281f566a5513d907fefc67da9237a2ec41ab8679..d1e9eb191f2bd77ff72614c0819885d31dcf473d 100644 (file)
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
        /*
         * Alocate and fill the csrow/channels structs
         */
-       mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+       mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
        if (!mci->csrows)
                goto error;
        for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
                csr->csrow_idx = row;
                csr->mci = mci;
                csr->nr_channels = tot_channels;
-               csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+               csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
                                        GFP_KERNEL);
                if (!csr->channels)
                        goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
        /*
         * Allocate and fill the dimm structs
         */
-       mci->dimms  = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+       mci->dimms  = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
        if (!mci->dimms)
                goto error;
 
index dc6e905ee1a5402126496bd06838302a1b26165f..0056c4dae9d52cc24985200dbb58b724be73c3ed 100644 (file)
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
        struct edac_pci_dev_attribute *edac_pci_dev;
        edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
 
-       if (edac_pci_dev->show)
+       if (edac_pci_dev->store)
                return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
        return -EIO;
 }
index fd3ae6290d71ecfd927cc4447a942c099bd223ea..982f1f5f5742f21da9206f62fa622ce17adefe9f 100644 (file)
@@ -471,7 +471,7 @@ void __init dmi_scan_machine(void)
        char __iomem *p, *q;
        int rc;
 
-       if (efi_enabled) {
+       if (efi_enabled(EFI_CONFIG_TABLES)) {
                if (efi.smbios == EFI_INVALID_TABLE_ADDR)
                        goto error;
 
index 7b1c37497c9a2683569f8a1ac9c959e8d5bc7636..f5596db0cf583dc16226bc629a42977de0f9fa05 100644 (file)
@@ -674,7 +674,7 @@ static int efi_status_to_err(efi_status_t status)
                err = -EACCES;
                break;
        case EFI_NOT_FOUND:
-               err = -ENOENT;
+               err = -EIO;
                break;
        default:
                err = -EINVAL;
@@ -793,6 +793,7 @@ static ssize_t efivarfs_file_write(struct file *file,
                spin_unlock(&efivars->lock);
                efivar_unregister(var);
                drop_nlink(inode);
+               d_delete(file->f_dentry);
                dput(file->f_dentry);
 
        } else {
@@ -994,7 +995,7 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
                list_del(&var->list);
                spin_unlock(&efivars->lock);
                efivar_unregister(var);
-               drop_nlink(dir);
+               drop_nlink(dentry->d_inode);
                dput(dentry);
                return 0;
        }
@@ -1782,7 +1783,7 @@ efivars_init(void)
        printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
               EFIVARS_DATE);
 
-       if (!efi_enabled)
+       if (!efi_enabled(EFI_RUNTIME_SERVICES))
                return 0;
 
        /* For now we'll register the efi directory at /sys/firmware/efi */
@@ -1822,7 +1823,7 @@ err_put:
 static void __exit
 efivars_exit(void)
 {
-       if (efi_enabled) {
+       if (efi_enabled(EFI_RUNTIME_SERVICES)) {
                unregister_efivars(&__efivars);
                kobject_put(efi_kobj);
        }
index 4da4eb9ae92604c35349ebaeb39b4a612bac3ed6..2224f1dc074b1329d7ce9c80b78bef4fffc6e98b 100644 (file)
@@ -99,7 +99,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep)
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!efi_enabled)
+       if (!efi_enabled(EFI_BOOT))
                find_ibft_in_mem();
 
        if (ibft_addr) {
index 682de754d63f166a92de96338c7f4d01bf87cdb2..e5116fa851406ffc57b900ba3374f2f09091216a 100644 (file)
@@ -657,12 +657,6 @@ config GPIO_JANZ_TTL
          This driver provides support for driving the pins in output
          mode only. Input mode is not supported.
 
-config GPIO_AB8500
-       bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
-       depends on AB8500_CORE && BROKEN
-       help
-         Select this to enable the AB8500 IC GPIO driver
-
 config GPIO_TPS6586X
        bool "TPS6586X GPIO"
        depends on MFD_TPS6586X
index c5aebd008dde4d3e5428fb90de3563f82ae159ac..45a388c21d0468f54782492dc014dcdbb0a5df14 100644 (file)
@@ -10,7 +10,6 @@ obj-$(CONFIG_GPIO_ACPI)               += gpiolib-acpi.o
 obj-$(CONFIG_GPIO_GENERIC)     += gpio-generic.o
 
 obj-$(CONFIG_GPIO_74X164)      += gpio-74x164.o
-obj-$(CONFIG_GPIO_AB8500)      += gpio-ab8500.o
 obj-$(CONFIG_GPIO_ADNP)                += gpio-adnp.o
 obj-$(CONFIG_GPIO_ADP5520)     += gpio-adp5520.o
 obj-$(CONFIG_GPIO_ADP5588)     += gpio-adp5588.o
diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c
deleted file mode 100644 (file)
index 983ad42..0000000
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * Author: BIBEK BASU <bibek.basu@stericsson.com>
- * License terms: GNU General Public License (GPL) version 2
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
-#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpio.h>
-
-/*
- * GPIO registers offset
- * Bank: 0x10
- */
-#define AB8500_GPIO_SEL1_REG   0x00
-#define AB8500_GPIO_SEL2_REG   0x01
-#define AB8500_GPIO_SEL3_REG   0x02
-#define AB8500_GPIO_SEL4_REG   0x03
-#define AB8500_GPIO_SEL5_REG   0x04
-#define AB8500_GPIO_SEL6_REG   0x05
-
-#define AB8500_GPIO_DIR1_REG   0x10
-#define AB8500_GPIO_DIR2_REG   0x11
-#define AB8500_GPIO_DIR3_REG   0x12
-#define AB8500_GPIO_DIR4_REG   0x13
-#define AB8500_GPIO_DIR5_REG   0x14
-#define AB8500_GPIO_DIR6_REG   0x15
-
-#define AB8500_GPIO_OUT1_REG   0x20
-#define AB8500_GPIO_OUT2_REG   0x21
-#define AB8500_GPIO_OUT3_REG   0x22
-#define AB8500_GPIO_OUT4_REG   0x23
-#define AB8500_GPIO_OUT5_REG   0x24
-#define AB8500_GPIO_OUT6_REG   0x25
-
-#define AB8500_GPIO_PUD1_REG   0x30
-#define AB8500_GPIO_PUD2_REG   0x31
-#define AB8500_GPIO_PUD3_REG   0x32
-#define AB8500_GPIO_PUD4_REG   0x33
-#define AB8500_GPIO_PUD5_REG   0x34
-#define AB8500_GPIO_PUD6_REG   0x35
-
-#define AB8500_GPIO_IN1_REG    0x40
-#define AB8500_GPIO_IN2_REG    0x41
-#define AB8500_GPIO_IN3_REG    0x42
-#define AB8500_GPIO_IN4_REG    0x43
-#define AB8500_GPIO_IN5_REG    0x44
-#define AB8500_GPIO_IN6_REG    0x45
-#define AB8500_GPIO_ALTFUN_REG 0x45
-#define ALTFUN_REG_INDEX       6
-#define AB8500_NUM_GPIO                42
-#define AB8500_NUM_VIR_GPIO_IRQ        16
-
-enum ab8500_gpio_action {
-       NONE,
-       STARTUP,
-       SHUTDOWN,
-       MASK,
-       UNMASK
-};
-
-struct ab8500_gpio {
-       struct gpio_chip chip;
-       struct ab8500 *parent;
-       struct device *dev;
-       struct mutex lock;
-       u32 irq_base;
-       enum ab8500_gpio_action irq_action;
-       u16 rising;
-       u16 falling;
-};
-/**
- * to_ab8500_gpio() - get the pointer to ab8500_gpio
- * @chip:      Member of the structure ab8500_gpio
- */
-static inline struct ab8500_gpio *to_ab8500_gpio(struct gpio_chip *chip)
-{
-       return container_of(chip, struct ab8500_gpio, chip);
-}
-
-static int ab8500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
-                                       unsigned offset, int val)
-{
-       struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
-       u8 pos = offset % 8;
-       int ret;
-
-       reg = reg + (offset / 8);
-       ret = abx500_mask_and_set_register_interruptible(ab8500_gpio->dev,
-                               AB8500_MISC, reg, 1 << pos, val << pos);
-       if (ret < 0)
-               dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
-       return ret;
-}
-/**
- * ab8500_gpio_get() - Get the particular GPIO value
- * @chip: Gpio device
- * @offset: GPIO number to read
- */
-static int ab8500_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
-       u8 mask = 1 << (offset % 8);
-       u8 reg = AB8500_GPIO_OUT1_REG + (offset / 8);
-       int ret;
-       u8 data;
-       ret = abx500_get_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
-                                               reg, &data);
-       if (ret < 0) {
-               dev_err(ab8500_gpio->dev, "%s read failed\n", __func__);
-               return ret;
-       }
-       return (data & mask) >> (offset % 8);
-}
-
-static void ab8500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
-{
-       struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
-       int ret;
-       /* Write the data */
-       ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, 1);
-       if (ret < 0)
-               dev_err(ab8500_gpio->dev, "%s write failed\n", __func__);
-}
-
-static int ab8500_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-                                       int val)
-{
-       int ret;
-       /* set direction as output */
-       ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
-       if (ret < 0)
-               return ret;
-       /* disable pull down */
-       ret = ab8500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
-       if (ret < 0)
-               return ret;
-       /* set the output as 1 or 0 */
-       return ab8500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
-
-}
-
-static int ab8500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       /* set the register as input */
-       return ab8500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
-}
-
-static int ab8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
-       /*
-        * Only some GPIOs are interrupt capable, and they are
-        * organized in discontiguous clusters:
-        *
-        *      GPIO6 to GPIO13
-        *      GPIO24 and GPIO25
-        *      GPIO36 to GPIO41
-        */
-       static struct ab8500_gpio_irq_cluster {
-               int start;
-               int end;
-       } clusters[] = {
-               {.start = 6,  .end = 13},
-               {.start = 24, .end = 25},
-               {.start = 36, .end = 41},
-       };
-       struct ab8500_gpio *ab8500_gpio = to_ab8500_gpio(chip);
-       int base = ab8500_gpio->irq_base;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(clusters); i++) {
-               struct ab8500_gpio_irq_cluster *cluster = &clusters[i];
-
-               if (offset >= cluster->start && offset <= cluster->end)
-                       return base + offset - cluster->start;
-
-               /* Advance by the number of gpios in this cluster */
-               base += cluster->end - cluster->start + 1;
-       }
-
-       return -EINVAL;
-}
-
-static struct gpio_chip ab8500gpio_chip = {
-       .label                  = "ab8500_gpio",
-       .owner                  = THIS_MODULE,
-       .direction_input        = ab8500_gpio_direction_input,
-       .get                    = ab8500_gpio_get,
-       .direction_output       = ab8500_gpio_direction_output,
-       .set                    = ab8500_gpio_set,
-       .to_irq                 = ab8500_gpio_to_irq,
-};
-
-static unsigned int irq_to_rising(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       int offset = irq - ab8500_gpio->irq_base;
-       int new_irq = offset +  AB8500_INT_GPIO6R
-                       + ab8500_gpio->parent->irq_base;
-       return new_irq;
-}
-
-static unsigned int irq_to_falling(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       int offset = irq - ab8500_gpio->irq_base;
-       int new_irq = offset +  AB8500_INT_GPIO6F
-                       +  ab8500_gpio->parent->irq_base;
-       return new_irq;
-
-}
-
-static unsigned int rising_to_irq(unsigned int irq, void *dev)
-{
-       struct ab8500_gpio *ab8500_gpio = dev;
-       int offset = irq - AB8500_INT_GPIO6R
-                       - ab8500_gpio->parent->irq_base ;
-       int new_irq = offset + ab8500_gpio->irq_base;
-       return new_irq;
-}
-
-static unsigned int falling_to_irq(unsigned int irq, void *dev)
-{
-       struct ab8500_gpio *ab8500_gpio = dev;
-       int offset = irq - AB8500_INT_GPIO6F
-                       - ab8500_gpio->parent->irq_base ;
-       int new_irq = offset + ab8500_gpio->irq_base;
-       return new_irq;
-
-}
-
-/*
- * IRQ handler
- */
-
-static irqreturn_t handle_rising(int irq, void *dev)
-{
-
-       handle_nested_irq(rising_to_irq(irq , dev));
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t handle_falling(int irq, void *dev)
-{
-
-       handle_nested_irq(falling_to_irq(irq, dev));
-       return IRQ_HANDLED;
-}
-
-static void ab8500_gpio_irq_lock(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       mutex_lock(&ab8500_gpio->lock);
-}
-
-static void ab8500_gpio_irq_sync_unlock(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       int offset = irq - ab8500_gpio->irq_base;
-       bool rising = ab8500_gpio->rising & BIT(offset);
-       bool falling = ab8500_gpio->falling & BIT(offset);
-       int ret;
-
-       switch (ab8500_gpio->irq_action)        {
-       case STARTUP:
-               if (rising)
-                       ret = request_threaded_irq(irq_to_rising(irq),
-                                       NULL, handle_rising,
-                                       IRQF_TRIGGER_RISING,
-                                       "ab8500-gpio-r", ab8500_gpio);
-               if (falling)
-                       ret = request_threaded_irq(irq_to_falling(irq),
-                                      NULL, handle_falling,
-                                      IRQF_TRIGGER_FALLING,
-                                      "ab8500-gpio-f", ab8500_gpio);
-               break;
-       case SHUTDOWN:
-               if (rising)
-                       free_irq(irq_to_rising(irq), ab8500_gpio);
-               if (falling)
-                       free_irq(irq_to_falling(irq), ab8500_gpio);
-               break;
-       case MASK:
-               if (rising)
-                       disable_irq(irq_to_rising(irq));
-               if (falling)
-                       disable_irq(irq_to_falling(irq));
-               break;
-       case UNMASK:
-               if (rising)
-                       enable_irq(irq_to_rising(irq));
-               if (falling)
-                       enable_irq(irq_to_falling(irq));
-               break;
-       case NONE:
-               break;
-       }
-       ab8500_gpio->irq_action = NONE;
-       ab8500_gpio->rising &= ~(BIT(offset));
-       ab8500_gpio->falling &= ~(BIT(offset));
-       mutex_unlock(&ab8500_gpio->lock);
-}
-
-
-static void ab8500_gpio_irq_mask(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       ab8500_gpio->irq_action = MASK;
-}
-
-static void ab8500_gpio_irq_unmask(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       ab8500_gpio->irq_action = UNMASK;
-}
-
-static int ab8500_gpio_irq_set_type(unsigned int irq, unsigned int type)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       int offset = irq - ab8500_gpio->irq_base;
-
-       if (type == IRQ_TYPE_EDGE_BOTH) {
-               ab8500_gpio->rising =  BIT(offset);
-               ab8500_gpio->falling = BIT(offset);
-       } else if (type == IRQ_TYPE_EDGE_RISING) {
-               ab8500_gpio->rising =  BIT(offset);
-       } else  {
-               ab8500_gpio->falling = BIT(offset);
-       }
-       return 0;
-}
-
-unsigned int ab8500_gpio_irq_startup(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       ab8500_gpio->irq_action = STARTUP;
-       return 0;
-}
-
-void ab8500_gpio_irq_shutdown(unsigned int irq)
-{
-       struct ab8500_gpio *ab8500_gpio = get_irq_chip_data(irq);
-       ab8500_gpio->irq_action = SHUTDOWN;
-}
-
-static struct irq_chip ab8500_gpio_irq_chip = {
-       .name                   = "ab8500-gpio",
-       .startup                = ab8500_gpio_irq_startup,
-       .shutdown               = ab8500_gpio_irq_shutdown,
-       .bus_lock               = ab8500_gpio_irq_lock,
-       .bus_sync_unlock        = ab8500_gpio_irq_sync_unlock,
-       .mask                   = ab8500_gpio_irq_mask,
-       .unmask                 = ab8500_gpio_irq_unmask,
-       .set_type               = ab8500_gpio_irq_set_type,
-};
-
-static int ab8500_gpio_irq_init(struct ab8500_gpio *ab8500_gpio)
-{
-       u32 base = ab8500_gpio->irq_base;
-       int irq;
-
-       for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ ; irq++) {
-               set_irq_chip_data(irq, ab8500_gpio);
-               set_irq_chip_and_handler(irq, &ab8500_gpio_irq_chip,
-                               handle_simple_irq);
-               set_irq_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-               set_irq_flags(irq, IRQF_VALID);
-#else
-               set_irq_noprobe(irq);
-#endif
-       }
-
-       return 0;
-}
-
-static void ab8500_gpio_irq_remove(struct ab8500_gpio *ab8500_gpio)
-{
-       int base = ab8500_gpio->irq_base;
-       int irq;
-
-       for (irq = base; irq < base + AB8500_NUM_VIR_GPIO_IRQ; irq++) {
-#ifdef CONFIG_ARM
-               set_irq_flags(irq, 0);
-#endif
-               set_irq_chip_and_handler(irq, NULL, NULL);
-               set_irq_chip_data(irq, NULL);
-       }
-}
-
-static int ab8500_gpio_probe(struct platform_device *pdev)
-{
-       struct ab8500_platform_data *ab8500_pdata =
-                               dev_get_platdata(pdev->dev.parent);
-       struct ab8500_gpio_platform_data *pdata;
-       struct ab8500_gpio *ab8500_gpio;
-       int ret;
-       int i;
-
-       pdata = ab8500_pdata->gpio;
-       if (!pdata)     {
-               dev_err(&pdev->dev, "gpio platform data missing\n");
-               return -ENODEV;
-       }
-
-       ab8500_gpio = kzalloc(sizeof(struct ab8500_gpio), GFP_KERNEL);
-       if (ab8500_gpio == NULL) {
-               dev_err(&pdev->dev, "failed to allocate memory\n");
-               return -ENOMEM;
-       }
-       ab8500_gpio->dev = &pdev->dev;
-       ab8500_gpio->parent = dev_get_drvdata(pdev->dev.parent);
-       ab8500_gpio->chip = ab8500gpio_chip;
-       ab8500_gpio->chip.ngpio = AB8500_NUM_GPIO;
-       ab8500_gpio->chip.dev = &pdev->dev;
-       ab8500_gpio->chip.base = pdata->gpio_base;
-       ab8500_gpio->irq_base = pdata->irq_base;
-       /* initialize the lock */
-       mutex_init(&ab8500_gpio->lock);
-       /*
-        * AB8500 core will handle and clear the IRQ
-        * configre GPIO based on config-reg value.
-        * These values are for selecting the PINs as
-        * GPIO or alternate function
-        */
-       for (i = AB8500_GPIO_SEL1_REG; i <= AB8500_GPIO_SEL6_REG; i++)  {
-               ret = abx500_set_register_interruptible(ab8500_gpio->dev,
-                               AB8500_MISC, i,
-                               pdata->config_reg[i]);
-               if (ret < 0)
-                       goto out_free;
-       }
-       ret = abx500_set_register_interruptible(ab8500_gpio->dev, AB8500_MISC,
-                               AB8500_GPIO_ALTFUN_REG,
-                               pdata->config_reg[ALTFUN_REG_INDEX]);
-       if (ret < 0)
-               goto out_free;
-
-       ret = ab8500_gpio_irq_init(ab8500_gpio);
-       if (ret)
-               goto out_free;
-       ret = gpiochip_add(&ab8500_gpio->chip);
-       if (ret) {
-               dev_err(&pdev->dev, "unable to add gpiochip: %d\n",
-                               ret);
-               goto out_rem_irq;
-       }
-       platform_set_drvdata(pdev, ab8500_gpio);
-       return 0;
-
-out_rem_irq:
-       ab8500_gpio_irq_remove(ab8500_gpio);
-out_free:
-       mutex_destroy(&ab8500_gpio->lock);
-       kfree(ab8500_gpio);
-       return ret;
-}
-
-/*
- * ab8500_gpio_remove() - remove Ab8500-gpio driver
- * @pdev :     Platform device registered
- */
-static int ab8500_gpio_remove(struct platform_device *pdev)
-{
-       struct ab8500_gpio *ab8500_gpio = platform_get_drvdata(pdev);
-       int ret;
-
-       ret = gpiochip_remove(&ab8500_gpio->chip);
-       if (ret < 0) {
-               dev_err(ab8500_gpio->dev, "unable to remove gpiochip: %d\n",
-                       ret);
-               return ret;
-       }
-
-       platform_set_drvdata(pdev, NULL);
-       mutex_destroy(&ab8500_gpio->lock);
-       kfree(ab8500_gpio);
-
-       return 0;
-}
-
-static struct platform_driver ab8500_gpio_driver = {
-       .driver = {
-               .name = "ab8500-gpio",
-               .owner = THIS_MODULE,
-       },
-       .probe = ab8500_gpio_probe,
-       .remove = ab8500_gpio_remove,
-};
-
-static int __init ab8500_gpio_init(void)
-{
-       return platform_driver_register(&ab8500_gpio_driver);
-}
-arch_initcall(ab8500_gpio_init);
-
-static void __exit ab8500_gpio_exit(void)
-{
-       platform_driver_unregister(&ab8500_gpio_driver);
-}
-module_exit(ab8500_gpio_exit);
-
-MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>");
-MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO");
-MODULE_ALIAS("platform:ab8500-gpio");
-MODULE_LICENSE("GPL v2");
index 7d9bd94be8d2a2108b66ac9575071e5751c06c79..6819d63cb1673647e35821ce3feede298e3e56c7 100644 (file)
@@ -547,7 +547,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
        mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
        if (! mvchip->membase) {
                dev_err(&pdev->dev, "Cannot ioremap\n");
-               kfree(mvchip->chip.label);
                return -ENOMEM;
        }
 
@@ -557,14 +556,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
                if (! res) {
                        dev_err(&pdev->dev, "Cannot get memory resource\n");
-                       kfree(mvchip->chip.label);
                        return -ENODEV;
                }
 
                mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
                if (! mvchip->percpu_membase) {
                        dev_err(&pdev->dev, "Cannot ioremap\n");
-                       kfree(mvchip->chip.label);
                        return -ENOMEM;
                }
        }
@@ -625,7 +622,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
        mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
        if (mvchip->irqbase < 0) {
                dev_err(&pdev->dev, "no irqs\n");
-               kfree(mvchip->chip.label);
                return -ENOMEM;
        }
 
@@ -633,7 +629,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                                    mvchip->membase, handle_level_irq);
        if (! gc) {
                dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
-               kfree(mvchip->chip.label);
                return -ENOMEM;
        }
 
@@ -668,7 +663,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
                irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
                                        IRQ_LEVEL | IRQ_NOPROBE);
                kfree(gc);
-               kfree(mvchip->chip.label);
                return -ENODEV;
        }
 
index 01f7fe955590c1b79a64cd088431858b7d9caea8..76be7eed79dec2683e244a0a9f60a1688241e1ab 100644 (file)
@@ -32,7 +32,6 @@
 
 #include <mach/hardware.h>
 #include <mach/map.h>
-#include <mach/regs-clock.h>
 #include <mach/regs-gpio.h>
 
 #include <plat/cpu.h>
@@ -446,7 +445,7 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
 };
 #endif
 
-#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_SOC_EXYNOS5250)
 static struct samsung_gpio_cfg exynos_gpio_cfg = {
        .set_pull       = exynos_gpio_setpull,
        .get_pull       = exynos_gpio_getpull,
@@ -2446,7 +2445,7 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_1[] = {
        {
                .chip   = {
@@ -2614,7 +2613,7 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_2[] = {
        {
                .chip   = {
@@ -2675,7 +2674,7 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_3[] = {
        {
                .chip   = {
@@ -2711,7 +2710,7 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
 };
 #endif
 
-#ifdef CONFIG_ARCH_EXYNOS5
+#ifdef CONFIG_SOC_EXYNOS5250
 static struct samsung_gpio_chip exynos5_gpios_4[] = {
        {
                .chip   = {
@@ -3010,7 +3009,7 @@ static __init int samsung_gpiolib_init(void)
        int i, nr_chips;
        int group = 0;
 
-#ifdef CONFIG_PINCTRL_SAMSUNG
+#if defined(CONFIG_PINCTRL_EXYNOS) || defined(CONFIG_PINCTRL_EXYNOS5440)
        /*
        * This gpio driver includes support for device tree support and there
        * are platforms using it. In order to maintain compatibility with those
@@ -3026,6 +3025,7 @@ static __init int samsung_gpiolib_init(void)
        static const struct of_device_id exynos_pinctrl_ids[] = {
                { .compatible = "samsung,pinctrl-exynos4210", },
                { .compatible = "samsung,pinctrl-exynos4x12", },
+               { .compatible = "samsung,pinctrl-exynos5440", },
        };
        for_each_matching_node(pctrl_np, exynos_pinctrl_ids)
                if (pctrl_np && of_device_is_available(pctrl_np))
index d542a141811a00b2f49589ba8db583b7b30c0ac0..25b1dbe8921d62705c3e76d10d5bb08437bb7d1b 100644 (file)
@@ -250,7 +250,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
                 * on the same GPIO chip.
                 */
                ret = gpiochip_add_pin_range(chip,
-                                            pinctrl_dev_get_name(pctldev),
+                                            pinctrl_dev_get_devname(pctldev),
                                             0, /* offset in gpiochip */
                                             pinspec.args[0],
                                             pinspec.args[1]);
index 2bf9670ba29b3a584c878e15ab9f69bfdcc11255..2aa331499f81da1dfbe8f5c51216ed0096f67951 100644 (file)
@@ -221,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
 
        BUG_ON(!hole_node->hole_follows || node->allocated);
 
-       if (mm->color_adjust)
-               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
        if (adj_start < start)
                adj_start = start;
+       if (adj_end > end)
+               adj_end = end;
+
+       if (mm->color_adjust)
+               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
        if (alignment) {
                unsigned tmp = adj_start % alignment;
@@ -506,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
        mm->scan_size = size;
        mm->scanned_blocks = 0;
        mm->scan_hit_start = 0;
-       mm->scan_hit_size = 0;
+       mm->scan_hit_end = 0;
        mm->scan_check_range = 0;
        mm->prev_scanned_node = NULL;
 }
@@ -533,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
        mm->scan_size = size;
        mm->scanned_blocks = 0;
        mm->scan_hit_start = 0;
-       mm->scan_hit_size = 0;
+       mm->scan_hit_end = 0;
        mm->scan_start = start;
        mm->scan_end = end;
        mm->scan_check_range = 1;
@@ -552,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
        struct drm_mm *mm = node->mm;
        struct drm_mm_node *prev_node;
        unsigned long hole_start, hole_end;
-       unsigned long adj_start;
-       unsigned long adj_end;
+       unsigned long adj_start, adj_end;
 
        mm->scanned_blocks++;
 
@@ -570,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
        node->node_list.next = &mm->prev_scanned_node->node_list;
        mm->prev_scanned_node = node;
 
-       hole_start = drm_mm_hole_node_start(prev_node);
-       hole_end = drm_mm_hole_node_end(prev_node);
-
-       adj_start = hole_start;
-       adj_end = hole_end;
-
-       if (mm->color_adjust)
-               mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+       adj_start = hole_start = drm_mm_hole_node_start(prev_node);
+       adj_end = hole_end = drm_mm_hole_node_end(prev_node);
 
        if (mm->scan_check_range) {
                if (adj_start < mm->scan_start)
@@ -586,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
                        adj_end = mm->scan_end;
        }
 
+       if (mm->color_adjust)
+               mm->color_adjust(prev_node, mm->scan_color,
+                                &adj_start, &adj_end);
+
        if (check_free_hole(adj_start, adj_end,
                            mm->scan_size, mm->scan_alignment)) {
                mm->scan_hit_start = hole_start;
-               mm->scan_hit_size = hole_end;
-
+               mm->scan_hit_end = hole_end;
                return 1;
        }
 
@@ -626,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
                               node_list);
 
        prev_node->hole_follows = node->scanned_preceeds_hole;
-       INIT_LIST_HEAD(&node->node_list);
        list_add(&node->node_list, &prev_node->node_list);
 
-       /* Only need to check for containement because start&size for the
-        * complete resulting free block (not just the desired part) is
-        * stored. */
-       if (node->start >= mm->scan_hit_start &&
-           node->start + node->size
-                       <= mm->scan_hit_start + mm->scan_hit_size) {
-               return 1;
-       }
-
-       return 0;
+        return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
+                node->start < mm->scan_hit_end);
 }
 EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
index 1d1f1e5e33f0ff267daa1d8ed4818b0a4e00fc7e..046bcda36abe7aa0f60e53f79f9988eaef61bd17 100644 (file)
@@ -24,7 +24,7 @@ config DRM_EXYNOS_DMABUF
 
 config DRM_EXYNOS_FIMD
        bool "Exynos DRM FIMD"
-       depends on DRM_EXYNOS && !FB_S3C
+       depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
        help
          Choose this option if you want to use Exynos FIMD for DRM.
 
@@ -48,7 +48,7 @@ config DRM_EXYNOS_G2D
 
 config DRM_EXYNOS_IPP
        bool "Exynos DRM IPP"
-       depends on DRM_EXYNOS
+       depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
        help
          Choose this option if you want to use IPP feature for DRM.
 
index ab37437bad8a9f0f5baa8b0425584ba95f5eb2c0..4c5b6859c9ea5cab2aa856ce43d3675f0240235e 100644 (file)
@@ -18,7 +18,6 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_encoder.h"
 
-#define MAX_EDID 256
 #define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
                                drm_connector)
 
@@ -96,7 +95,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                                        to_exynos_connector(connector);
        struct exynos_drm_manager *manager = exynos_connector->manager;
        struct exynos_drm_display_ops *display_ops = manager->display_ops;
-       unsigned int count;
+       struct edid *edid = NULL;
+       unsigned int count = 0;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -114,27 +115,21 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
         * because lcd panel has only one mode.
         */
        if (display_ops->get_edid) {
-               int ret;
-               void *edid;
-
-               edid = kzalloc(MAX_EDID, GFP_KERNEL);
-               if (!edid) {
-                       DRM_ERROR("failed to allocate edid\n");
-                       return 0;
+               edid = display_ops->get_edid(manager->dev, connector);
+               if (IS_ERR_OR_NULL(edid)) {
+                       ret = PTR_ERR(edid);
+                       edid = NULL;
+                       DRM_ERROR("Panel operation get_edid failed %d\n", ret);
+                       goto out;
                }
 
-               ret = display_ops->get_edid(manager->dev, connector,
-                                               edid, MAX_EDID);
-               if (ret < 0) {
-                       DRM_ERROR("failed to get edid data.\n");
-                       kfree(edid);
-                       edid = NULL;
-                       return 0;
+               count = drm_add_edid_modes(connector, edid);
+               if (count < 0) {
+                       DRM_ERROR("Add edid modes failed %d\n", count);
+                       goto out;
                }
 
                drm_mode_connector_update_edid_property(connector, edid);
-               count = drm_add_edid_modes(connector, edid);
-               kfree(edid);
        } else {
                struct exynos_drm_panel_info *panel;
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -161,6 +156,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
                count = 1;
        }
 
+out:
+       kfree(edid);
        return count;
 }
 
index 9df97714b6c0b8a2244fa1f61da5eb4a76c774dd..ba0a3aa785474f27ca0ba23b181e71d7d5bf82a9 100644 (file)
@@ -19,6 +19,7 @@
 struct exynos_drm_dmabuf_attachment {
        struct sg_table sgt;
        enum dma_data_direction dir;
+       bool is_mapped;
 };
 
 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
@@ -72,17 +73,10 @@ static struct sg_table *
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
 
-       if (WARN_ON(dir == DMA_NONE))
-               return ERR_PTR(-EINVAL);
-
        /* just return current sgt if already requested. */
-       if (exynos_attach->dir == dir)
+       if (exynos_attach->dir == dir && exynos_attach->is_mapped)
                return &exynos_attach->sgt;
 
-       /* reattaching is not allowed. */
-       if (WARN_ON(exynos_attach->dir != DMA_NONE))
-               return ERR_PTR(-EBUSY);
-
        buf = gem_obj->buffer;
        if (!buf) {
                DRM_ERROR("buffer is null.\n");
@@ -107,13 +101,17 @@ static struct sg_table *
                wr = sg_next(wr);
        }
 
-       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-       if (!nents) {
-               DRM_ERROR("failed to map sgl with iommu.\n");
-               sgt = ERR_PTR(-EIO);
-               goto err_unlock;
+       if (dir != DMA_NONE) {
+               nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+               if (!nents) {
+                       DRM_ERROR("failed to map sgl with iommu.\n");
+                       sg_free_table(sgt);
+                       sgt = ERR_PTR(-EIO);
+                       goto err_unlock;
+               }
        }
 
+       exynos_attach->is_mapped = true;
        exynos_attach->dir = dir;
        attach->priv = exynos_attach;
 
index b9e51bc09e81ad3ba81943e860ebb7a2580e0f2d..4606fac7241a0f2762dd0f10e8674efda02ac7d3 100644 (file)
@@ -148,8 +148,8 @@ struct exynos_drm_overlay {
 struct exynos_drm_display_ops {
        enum exynos_drm_output_type type;
        bool (*is_connected)(struct device *dev);
-       int (*get_edid)(struct device *dev, struct drm_connector *connector,
-                               u8 *edid, int len);
+       struct edid *(*get_edid)(struct device *dev,
+                       struct drm_connector *connector);
        void *(*get_panel)(struct device *dev);
        int (*check_timing)(struct device *dev, void *timing);
        int (*power_on)(struct device *dev, int mode);
index 36c3905536a65e824766ec61a385bb44a5f6c3f9..9a4c08e7453c35b2566c2c76b3bc7fa4406d7696 100644 (file)
@@ -324,7 +324,7 @@ out:
        g2d_userptr = NULL;
 }
 
-dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
                                        unsigned long userptr,
                                        unsigned long size,
                                        struct drm_file *filp,
index 850e9950b7da06c44d7fddaed4afdee812ce88ac..28644539b3056620912df744aaac7cccad6fa9a4 100644 (file)
@@ -108,18 +108,17 @@ static bool drm_hdmi_is_connected(struct device *dev)
        return false;
 }
 
-static int drm_hdmi_get_edid(struct device *dev,
-               struct drm_connector *connector, u8 *edid, int len)
+static struct edid *drm_hdmi_get_edid(struct device *dev,
+                       struct drm_connector *connector)
 {
        struct drm_hdmi_context *ctx = to_context(dev);
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        if (hdmi_ops && hdmi_ops->get_edid)
-               return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
-                                         len);
+               return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector);
 
-       return 0;
+       return NULL;
 }
 
 static int drm_hdmi_check_timing(struct device *dev, void *timing)
index 784a7e9a766c4a178e2f73e628e63adce5f6c364..d80516fc9ed7efd4675dd582ba3601118a4ffbad 100644 (file)
@@ -30,8 +30,8 @@ struct exynos_drm_hdmi_context {
 struct exynos_hdmi_ops {
        /* display */
        bool (*is_connected)(void *ctx);
-       int (*get_edid)(void *ctx, struct drm_connector *connector,
-                       u8 *edid, int len);
+       struct edid *(*get_edid)(void *ctx,
+                       struct drm_connector *connector);
        int (*check_timing)(void *ctx, void *timing);
        int (*power_on)(void *ctx, int mode);
 
index 0bda96454a02fe6e8cf31ced93c37249355a5f99..1a556354e92fa15ee5b051600662a9f0d98a868b 100644 (file)
@@ -869,7 +869,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
        }
 }
 
-void ipp_handle_cmd_work(struct device *dev,
+static void ipp_handle_cmd_work(struct device *dev,
                struct exynos_drm_ippdrv *ippdrv,
                struct drm_exynos_ipp_cmd_work *cmd_work,
                struct drm_exynos_ipp_cmd_node *c_node)
index e9e83ef688f0c3a4ccf0253154975eebddb1d378..f976e29def6effa8adc973e92b28ba9f9add4443 100644 (file)
@@ -734,7 +734,7 @@ static int rotator_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct rot_limit_table rot_limit_tbl = {
+static struct rot_limit_table rot_limit_tbl = {
        .ycbcr420_2p = {
                .min_w = 32,
                .min_h = 32,
@@ -751,7 +751,7 @@ struct rot_limit_table rot_limit_tbl = {
        },
 };
 
-struct platform_device_id rotator_driver_ids[] = {
+static struct platform_device_id rotator_driver_ids[] = {
        {
                .name           = "exynos-rot",
                .driver_data    = (unsigned long)&rot_limit_tbl,
index d0ca3c4e06c60bbeca05e87b783d1e7e4b690f16..13ccbd4bcfaa8c863f27d24870101c32166ecb47 100644 (file)
@@ -98,10 +98,12 @@ static bool vidi_display_is_connected(struct device *dev)
        return ctx->connected ? true : false;
 }
 
-static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
-                               u8 *edid, int len)
+static struct edid *vidi_get_edid(struct device *dev,
+                       struct drm_connector *connector)
 {
        struct vidi_context *ctx = get_vidi_context(dev);
+       struct edid *edid;
+       int edid_len;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -111,13 +113,18 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
         */
        if (!ctx->raw_edid) {
                DRM_DEBUG_KMS("raw_edid is null.\n");
-               return -EFAULT;
+               return ERR_PTR(-EFAULT);
        }
 
-       memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
-                                       * EDID_LENGTH, len));
+       edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
+       edid = kzalloc(edid_len, GFP_KERNEL);
+       if (!edid) {
+               DRM_DEBUG_KMS("failed to allocate edid\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
-       return 0;
+       memcpy(edid, ctx->raw_edid, edid_len);
+       return edid;
 }
 
 static void *vidi_get_panel(struct device *dev)
@@ -514,7 +521,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
        struct exynos_drm_manager *manager;
        struct exynos_drm_display_ops *display_ops;
        struct drm_exynos_vidi_connection *vidi = data;
-       struct edid *raw_edid;
        int edid_len;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -551,11 +557,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
        }
 
        if (vidi->connection) {
-               if (!vidi->edid) {
-                       DRM_DEBUG_KMS("edid data is null.\n");
+               struct edid *raw_edid  = (struct edid *)(uint32_t)vidi->edid;
+               if (!drm_edid_is_valid(raw_edid)) {
+                       DRM_DEBUG_KMS("edid data is invalid.\n");
                        return -EINVAL;
                }
-               raw_edid = (struct edid *)(uint32_t)vidi->edid;
                edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
                ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
                if (!ctx->raw_edid) {
index 41ff79d8ac8ec27db3f4b5cd3edadb0d86dde241..fbab3c4686031ce9c07a1b19546ab73ddb4f92e7 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/regulator/consumer.h>
 #include <linux/io.h>
 #include <linux/of_gpio.h>
-#include <plat/gpio-cfg.h>
 
 #include <drm/exynos_drm.h>
 
@@ -98,8 +97,7 @@ struct hdmi_context {
 
        void __iomem                    *regs;
        void                            *parent_ctx;
-       int                             external_irq;
-       int                             internal_irq;
+       int                             irq;
 
        struct i2c_client               *ddc_port;
        struct i2c_client               *hdmiphy_port;
@@ -1391,8 +1389,7 @@ static bool hdmi_is_connected(void *ctx)
        return hdata->hpd;
 }
 
-static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
-                               u8 *edid, int len)
+static struct edid *hdmi_get_edid(void *ctx, struct drm_connector *connector)
 {
        struct edid *raw_edid;
        struct hdmi_context *hdata = ctx;
@@ -1400,22 +1397,18 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
        if (!hdata->ddc_port)
-               return -ENODEV;
+               return ERR_PTR(-ENODEV);
 
        raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
-       if (raw_edid) {
-               hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
-               memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
-                                       * EDID_LENGTH, len));
-               DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
-                       (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
-                       raw_edid->width_cm, raw_edid->height_cm);
-               kfree(raw_edid);
-       } else {
-               return -ENODEV;
-       }
+       if (!raw_edid)
+               return ERR_PTR(-ENODEV);
 
-       return 0;
+       hdata->dvi_mode = !drm_detect_hdmi_monitor(raw_edid);
+       DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
+               (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+               raw_edid->width_cm, raw_edid->height_cm);
+
+       return raw_edid;
 }
 
 static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
@@ -1652,16 +1645,16 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
 
        /* resetting HDMI core */
        hdmi_reg_writemask(hdata, reg,  0, HDMI_CORE_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
        hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
 }
 
 static void hdmi_conf_init(struct hdmi_context *hdata)
 {
        struct hdmi_infoframe infoframe;
 
-       /* disable HPD interrupts */
+       /* disable HPD interrupts from HDMI IP block, use GPIO instead */
        hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
                HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
 
@@ -1779,7 +1772,7 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
                u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
                if (val & HDMI_PHY_STATUS_READY)
                        break;
-               mdelay(1);
+               usleep_range(1000, 2000);
        }
        /* steady state not achieved */
        if (tries == 0) {
@@ -1946,7 +1939,7 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
                u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
                if (val & HDMI_PHY_STATUS_READY)
                        break;
-               mdelay(1);
+               usleep_range(1000, 2000);
        }
        /* steady state not achieved */
        if (tries == 0) {
@@ -1998,9 +1991,9 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
 
        /* reset hdmiphy */
        hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
        hdmi_reg_writemask(hdata, reg,  0, HDMI_PHY_SW_RSTOUT);
-       mdelay(10);
+       usleep_range(10000, 12000);
 }
 
 static void hdmiphy_poweron(struct hdmi_context *hdata)
@@ -2048,7 +2041,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
                return;
        }
 
-       mdelay(10);
+       usleep_range(10000, 12000);
 
        /* operation mode */
        operation[0] = 0x1f;
@@ -2170,6 +2163,13 @@ static void hdmi_commit(void *ctx)
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
+       mutex_lock(&hdata->hdmi_mutex);
+       if (!hdata->powered) {
+               mutex_unlock(&hdata->hdmi_mutex);
+               return;
+       }
+       mutex_unlock(&hdata->hdmi_mutex);
+
        hdmi_conf_apply(hdata);
 }
 
@@ -2265,7 +2265,7 @@ static struct exynos_hdmi_ops hdmi_ops = {
        .dpms           = hdmi_dpms,
 };
 
-static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
 {
        struct exynos_drm_hdmi_context *ctx = arg;
        struct hdmi_context *hdata = ctx->ctx;
@@ -2280,31 +2280,6 @@ static irqreturn_t hdmi_external_irq_thread(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
-{
-       struct exynos_drm_hdmi_context *ctx = arg;
-       struct hdmi_context *hdata = ctx->ctx;
-       u32 intc_flag;
-
-       intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
-       /* clearing flags for HPD plug/unplug */
-       if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
-               DRM_DEBUG_KMS("unplugged\n");
-               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
-                       HDMI_INTC_FLAG_HPD_UNPLUG);
-       }
-       if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
-               DRM_DEBUG_KMS("plugged\n");
-               hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
-                       HDMI_INTC_FLAG_HPD_PLUG);
-       }
-
-       if (ctx->drm_dev)
-               drm_helper_hpd_irq_event(ctx->drm_dev);
-
-       return IRQ_HANDLED;
-}
-
 static int hdmi_resources_init(struct hdmi_context *hdata)
 {
        struct device *dev = hdata->dev;
@@ -2555,39 +2530,24 @@ static int hdmi_probe(struct platform_device *pdev)
 
        hdata->hdmiphy_port = hdmi_hdmiphy;
 
-       hdata->external_irq = gpio_to_irq(hdata->hpd_gpio);
-       if (hdata->external_irq < 0) {
-               DRM_ERROR("failed to get GPIO external irq\n");
-               ret = hdata->external_irq;
-               goto err_hdmiphy;
-       }
-
-       hdata->internal_irq = platform_get_irq(pdev, 0);
-       if (hdata->internal_irq < 0) {
-               DRM_ERROR("failed to get platform internal irq\n");
-               ret = hdata->internal_irq;
+       hdata->irq = gpio_to_irq(hdata->hpd_gpio);
+       if (hdata->irq < 0) {
+               DRM_ERROR("failed to get GPIO irq\n");
+               ret = hdata->irq;
                goto err_hdmiphy;
        }
 
        hdata->hpd = gpio_get_value(hdata->hpd_gpio);
 
-       ret = request_threaded_irq(hdata->external_irq, NULL,
-                       hdmi_external_irq_thread, IRQF_TRIGGER_RISING |
+       ret = request_threaded_irq(hdata->irq, NULL,
+                       hdmi_irq_thread, IRQF_TRIGGER_RISING |
                        IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-                       "hdmi_external", drm_hdmi_ctx);
+                       "hdmi", drm_hdmi_ctx);
        if (ret) {
-               DRM_ERROR("failed to register hdmi external interrupt\n");
+               DRM_ERROR("failed to register hdmi interrupt\n");
                goto err_hdmiphy;
        }
 
-       ret = request_threaded_irq(hdata->internal_irq, NULL,
-                       hdmi_internal_irq_thread, IRQF_ONESHOT,
-                       "hdmi_internal", drm_hdmi_ctx);
-       if (ret) {
-               DRM_ERROR("failed to register hdmi internal interrupt\n");
-               goto err_free_irq;
-       }
-
        /* Attach HDMI Driver to common hdmi. */
        exynos_hdmi_drv_attach(drm_hdmi_ctx);
 
@@ -2598,8 +2558,6 @@ static int hdmi_probe(struct platform_device *pdev)
 
        return 0;
 
-err_free_irq:
-       free_irq(hdata->external_irq, drm_hdmi_ctx);
 err_hdmiphy:
        i2c_del_driver(&hdmiphy_driver);
 err_ddc:
@@ -2617,8 +2575,7 @@ static int hdmi_remove(struct platform_device *pdev)
 
        pm_runtime_disable(dev);
 
-       free_irq(hdata->internal_irq, hdata);
-       free_irq(hdata->external_irq, hdata);
+       free_irq(hdata->irq, hdata);
 
 
        /* hdmiphy i2c driver */
@@ -2637,8 +2594,7 @@ static int hdmi_suspend(struct device *dev)
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       disable_irq(hdata->internal_irq);
-       disable_irq(hdata->external_irq);
+       disable_irq(hdata->irq);
 
        hdata->hpd = false;
        if (ctx->drm_dev)
@@ -2663,8 +2619,7 @@ static int hdmi_resume(struct device *dev)
 
        hdata->hpd = gpio_get_value(hdata->hpd_gpio);
 
-       enable_irq(hdata->external_irq);
-       enable_irq(hdata->internal_irq);
+       enable_irq(hdata->irq);
 
        if (!pm_runtime_suspended(dev)) {
                DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
index c187ea33b748c031b91d10e45479d98368b297f6..c414584bfbaecf36d285be933e9e212fbea7784d 100644 (file)
@@ -600,7 +600,7 @@ static void vp_win_reset(struct mixer_context *ctx)
                /* waiting until VP_SRESET_PROCESSING is 0 */
                if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
                        break;
-               mdelay(10);
+               usleep_range(10000, 12000);
        }
        WARN(tries == 0, "failed to reset Video Processor\n");
 }
@@ -776,6 +776,13 @@ static void mixer_win_commit(void *ctx, int win)
 
        DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
+       mutex_lock(&mixer_ctx->mixer_mutex);
+       if (!mixer_ctx->powered) {
+               mutex_unlock(&mixer_ctx->mixer_mutex);
+               return;
+       }
+       mutex_unlock(&mixer_ctx->mixer_mutex);
+
        if (win > 1 && mixer_ctx->vp_enabled)
                vp_video_buffer(mixer_ctx, win);
        else
index e6a11ca85eafa913f97d315493abd995fd0a764c..9d4a2c2adf0e0883d76d13070fa09f7aa32e1526 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <generated/utsrelease.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -641,6 +642,7 @@ static void i915_ring_error_state(struct seq_file *m,
        seq_printf(m, "%s command stream:\n", ring_str(ring));
        seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
        seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+       seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
        seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
        seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
        seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
@@ -689,10 +691,13 @@ static int i915_error_state(struct seq_file *m, void *unused)
 
        seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
+       seq_printf(m, "Kernel: " UTS_RELEASE);
        seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "IER: 0x%08x\n", error->ier);
        seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+       seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+       seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
        seq_printf(m, "CCID: 0x%08x\n", error->ccid);
 
        for (i = 0; i < dev_priv->num_fence_regs; i++)
index ed3059575576c4ea869c462e15e61463b4237f0f..12ab3bdea54dca227e8bd5f3f62cf00a31ce85ba 100644 (file)
@@ -188,10 +188,13 @@ struct drm_i915_error_state {
        u32 pgtbl_er;
        u32 ier;
        u32 ccid;
+       u32 derrmr;
+       u32 forcewake;
        bool waiting[I915_NUM_RINGS];
        u32 pipestat[I915_MAX_PIPES];
        u32 tail[I915_NUM_RINGS];
        u32 head[I915_NUM_RINGS];
+       u32 ctl[I915_NUM_RINGS];
        u32 ipeir[I915_NUM_RINGS];
        u32 ipehr[I915_NUM_RINGS];
        u32 instdone[I915_NUM_RINGS];
index da3c82e301b1868257ffa60fc90b31714b934ea0..8febea6daa0840b08f8aae13418b859204b7bc2d 100644 (file)
@@ -1717,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 }
 
 static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+                 bool purgeable_only)
 {
        struct drm_i915_gem_object *obj, *next;
        long count = 0;
@@ -1725,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.unbound_list,
                                 gtt_list) {
-               if (i915_gem_object_is_purgeable(obj) &&
+               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
@@ -1736,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.inactive_list,
                                 mm_list) {
-               if (i915_gem_object_is_purgeable(obj) &&
+               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_unbind(obj) == 0 &&
                    i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
@@ -1748,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
        return count;
 }
 
+static long
+i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+{
+       return __i915_gem_shrink(dev_priv, target, true);
+}
+
 static void
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
@@ -3522,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       obj->user_pin_count++;
-       obj->pin_filp = file;
-       if (obj->user_pin_count == 1) {
+       if (obj->user_pin_count == 0) {
                ret = i915_gem_object_pin(obj, args->alignment, true, false);
                if (ret)
                        goto out;
        }
 
+       obj->user_pin_count++;
+       obj->pin_filp = file;
+
        /* XXX - flush the CPU caches for pinned objects
         * as the X server doesn't manage domains yet
         */
@@ -4394,6 +4402,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 
        if (nr_to_scan) {
                nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
+               if (nr_to_scan > 0)
+                       nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
+                                                       false);
                if (nr_to_scan > 0)
                        i915_gem_shrink_all(dev_priv);
        }
@@ -4402,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
+       list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
 
index d6a994a07393677fb5b4fd6edb868cf7bd460cbe..26d08bb5821894daad6cdd1fa79b58f86cf226e2 100644 (file)
@@ -539,6 +539,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        total = 0;
        for (i = 0; i < count; i++) {
                struct drm_i915_gem_relocation_entry __user *user_relocs;
+               u64 invalid_offset = (u64)-1;
+               int j;
 
                user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
 
@@ -549,6 +551,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                        goto err;
                }
 
+               /* As we do not update the known relocation offsets after
+                * relocating (due to the complexities in lock handling),
+                * we need to mark them as invalid now so that we force the
+                * relocation processing next time. Just in case the target
+                * object is evicted and then rebound into its old
+                * presumed_offset before the next execbuffer - if that
+                * happened we would make the mistake of assuming that the
+                * relocations were valid.
+                */
+               for (j = 0; j < exec[i].relocation_count; j++) {
+                       if (copy_to_user(&user_relocs[j].presumed_offset,
+                                        &invalid_offset,
+                                        sizeof(invalid_offset))) {
+                               ret = -EFAULT;
+                               mutex_lock(&dev->struct_mutex);
+                               goto err;
+                       }
+               }
+
                reloc_offset[i] = total;
                total += exec[i].relocation_count;
        }
index 2220dec3e5d983eb9f730d95011e7eb2ebd82adc..fe843389c7b473e2035124c38330c3a965450773 100644 (file)
@@ -1157,6 +1157,7 @@ static void i915_record_ring_state(struct drm_device *dev,
        error->acthd[ring->id] = intel_ring_get_active_head(ring);
        error->head[ring->id] = I915_READ_HEAD(ring);
        error->tail[ring->id] = I915_READ_TAIL(ring);
+       error->ctl[ring->id] = I915_READ_CTL(ring);
 
        error->cpu_ring_head[ring->id] = ring->head;
        error->cpu_ring_tail[ring->id] = ring->tail;
@@ -1251,6 +1252,16 @@ static void i915_capture_error_state(struct drm_device *dev)
        else
                error->ier = I915_READ(IER);
 
+       if (INTEL_INFO(dev)->gen >= 6)
+               error->derrmr = I915_READ(DERRMR);
+
+       if (IS_VALLEYVIEW(dev))
+               error->forcewake = I915_READ(FORCEWAKE_VLV);
+       else if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ(FORCEWAKE_MT);
+       else if (INTEL_INFO(dev)->gen == 6)
+               error->forcewake = I915_READ(FORCEWAKE);
+
        for_each_pipe(pipe)
                error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
index 186ee5c85b516592d2c83c8cac8cb2142121e993..59afb7eb6db635cf2f9bbe5477ffc8bd75fddd30 100644 (file)
 #define GEN7_ERR_INT   0x44040
 #define   ERR_INT_MMIO_UNCLAIMED (1<<13)
 
+#define DERRMR         0x44050
+
 /* GM45+ chicken bits -- debug workaround bits that may be required
  * for various sorts of correct behavior.  The top 16 bits of each are
  * the enables for writing to the corresponding low bit.
 #define MI_MODE                0x0209c
 # define VS_TIMER_DISPATCH                             (1 << 6)
 # define MI_FLUSH_ENABLE                               (1 << 12)
+# define ASYNC_FLIP_PERF_DISABLE                       (1 << 14)
 
 #define GEN6_GT_MODE   0x20d0
 #define   GEN6_GT_MODE_HI                              (1 << 9)
index a9fb046b94a140ab169cbd2eb016922fc702dd98..da1ad9c80bb53722e7afd423b27a819f31ff6927 100644 (file)
@@ -8598,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev,
 {
        int ret;
 
-       if (obj->tiling_mode == I915_TILING_Y)
+       if (obj->tiling_mode == I915_TILING_Y) {
+               DRM_DEBUG("hardware does not support tiling Y\n");
                return -EINVAL;
+       }
 
-       if (mode_cmd->pitches[0] & 63)
+       if (mode_cmd->pitches[0] & 63) {
+               DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
+                         mode_cmd->pitches[0]);
                return -EINVAL;
+       }
 
        /* FIXME <= Gen4 stride limits are bit unclear */
-       if (mode_cmd->pitches[0] > 32768)
+       if (mode_cmd->pitches[0] > 32768) {
+               DRM_DEBUG("pitch (%d) must be at less than 32768\n",
+                         mode_cmd->pitches[0]);
                return -EINVAL;
+       }
 
        if (obj->tiling_mode != I915_TILING_NONE &&
-           mode_cmd->pitches[0] != obj->stride)
+           mode_cmd->pitches[0] != obj->stride) {
+               DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
+                         mode_cmd->pitches[0], obj->stride);
                return -EINVAL;
+       }
 
        /* Reject formats not supported by any plane early. */
        switch (mode_cmd->pixel_format) {
@@ -8621,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev,
                break;
        case DRM_FORMAT_XRGB1555:
        case DRM_FORMAT_ARGB1555:
-               if (INTEL_INFO(dev)->gen > 3)
+               if (INTEL_INFO(dev)->gen > 3) {
+                       DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
                        return -EINVAL;
+               }
                break;
        case DRM_FORMAT_XBGR8888:
        case DRM_FORMAT_ABGR8888:
@@ -8630,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev,
        case DRM_FORMAT_ARGB2101010:
        case DRM_FORMAT_XBGR2101010:
        case DRM_FORMAT_ABGR2101010:
-               if (INTEL_INFO(dev)->gen < 4)
+               if (INTEL_INFO(dev)->gen < 4) {
+                       DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
                        return -EINVAL;
+               }
                break;
        case DRM_FORMAT_YUYV:
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_YVYU:
        case DRM_FORMAT_VYUY:
-               if (INTEL_INFO(dev)->gen < 6)
+               if (INTEL_INFO(dev)->gen < 5) {
+                       DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
                        return -EINVAL;
+               }
                break;
        default:
-               DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
+               DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
                return -EINVAL;
        }
 
index 1b63d55318a0b466cdd45f6fcfb9af9259473557..fb3715b4b09d26d1357fdfe16320d6680f5ee92a 100644 (file)
@@ -2579,7 +2579,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
 
 static void
 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
-                                   struct intel_dp *intel_dp)
+                                   struct intel_dp *intel_dp,
+                                   struct edp_power_seq *out)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_power_seq cur, vbt, spec, final;
@@ -2650,16 +2651,35 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
 #undef get_delay
 
+       DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                     intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                     intel_dp->panel_power_cycle_delay);
+
+       DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                     intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+       if (out)
+               *out = final;
+}
+
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+                                             struct intel_dp *intel_dp,
+                                             struct edp_power_seq *seq)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_on, pp_off, pp_div;
+
        /* And finally store the new values in the power sequencer. */
-       pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-               (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
-       pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
-                (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+       pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+               (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+                (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
        pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
                        << PP_REFERENCE_DIVIDER_SHIFT;
-       pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
+       pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
                        << PANEL_POWER_CYCLE_DELAY_SHIFT);
 
        /* Haswell doesn't have any port selection bits for the panel
@@ -2675,14 +2695,6 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
        I915_WRITE(PCH_PP_DIVISOR, pp_div);
 
-
-       DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
-                     intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
-                     intel_dp->panel_power_cycle_delay);
-
-       DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
-                     intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-
        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
                      I915_READ(PCH_PP_ON_DELAYS),
                      I915_READ(PCH_PP_OFF_DELAYS),
@@ -2699,6 +2711,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
+       struct edp_power_seq power_seq = { 0 };
        enum port port = intel_dig_port->port;
        const char *name = NULL;
        int type;
@@ -2771,7 +2784,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        }
 
        if (is_edp(intel_dp))
-               intel_dp_init_panel_power_sequencer(dev, intel_dp);
+               intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 
        intel_dp_i2c_init(intel_dp, intel_connector, name);
 
@@ -2798,6 +2811,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                        return;
                }
 
+               /* We now know it's not a ghost, init power sequence regs. */
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                             &power_seq);
+
                ironlake_edp_panel_vdd_on(intel_dp);
                edid = drm_get_edid(connector, &intel_dp->adapter);
                if (edid) {
index b9a660a536772fd6728255b6173c047d8607222e..17aee74258ad4dfa43083e431fc3f0b224aa4ec4 100644 (file)
@@ -774,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
                },
        },
-       {
-               .callback = intel_no_lvds_dmi_callback,
-               .ident = "ZOTAC ZBOXSD-ID12/ID13",
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
-                       DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
-               },
-       },
        {
                .callback = intel_no_lvds_dmi_callback,
                .ident = "Gigabyte GA-D525TUD",
index e6f54ffab3ba4fa1dcf25b2e2dfe9a1f745ced07..3280cffe50f4486a4812d809d51ea904f79f04a9 100644 (file)
  * i915.i915_enable_fbc parameter
  */
 
+static bool intel_crtc_active(struct drm_crtc *crtc)
+{
+       /* Be paranoid as we can arrive here with only partial
+        * state retrieved from the hardware during setup.
+        */
+       return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
+}
+
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
         *   - going to an unsupported config (interlace, pixel multiply, etc.)
         */
        list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
-               if (to_intel_crtc(tmp_crtc)->active &&
-                   !to_intel_crtc(tmp_crtc)->primary_disabled &&
-                   tmp_crtc->fb) {
+               if (intel_crtc_active(tmp_crtc) &&
+                   !to_intel_crtc(tmp_crtc)->primary_disabled) {
                        if (crtc) {
                                DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
                                dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
        struct drm_crtc *crtc, *enabled = NULL;
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (to_intel_crtc(crtc)->active && crtc->fb) {
+               if (intel_crtc_active(crtc)) {
                        if (enabled)
                                return NULL;
                        enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+       if (!intel_crtc_active(crtc)) {
                *cursor_wm = cursor->guard_size;
                *plane_wm = display->guard_size;
                return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
        int entries;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active)
+       if (!intel_crtc_active(crtc))
                return false;
 
        clock = crtc->mode.clock;       /* VESA DOT Clock */
@@ -1476,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev)
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
-       if (to_intel_crtc(crtc)->active && crtc->fb) {
+       if (intel_crtc_active(crtc)) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@ -1490,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev)
 
        fifo_size = dev_priv->display.get_fifo_size(dev, 1);
        crtc = intel_get_crtc_for_plane(dev, 1);
-       if (to_intel_crtc(crtc)->active && crtc->fb) {
+       if (intel_crtc_active(crtc)) {
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
@@ -2044,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
        int entries, tlb_miss;
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       if (crtc->fb == NULL || !to_intel_crtc(crtc)->active) {
+       if (!intel_crtc_active(crtc)) {
                *sprite_wm = display->guard_size;
                return false;
        }
@@ -4243,7 +4250,8 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
-       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       POSTING_READ(ECOBUS);
 }
 
 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
@@ -4260,7 +4268,8 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
        I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       POSTING_READ(ECOBUS);
 
        if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
                            FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4297,14 +4306,16 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
-       /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+       /* something from same cacheline, but !FORCEWAKE */
+       POSTING_READ(ECOBUS);
        gen6_gt_check_fifodbg(dev_priv);
 }
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
+       /* something from same cacheline, but !FORCEWAKE_MT */
+       POSTING_READ(ECOBUS);
        gen6_gt_check_fifodbg(dev_priv);
 }
 
@@ -4344,6 +4355,8 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+       /* something from same cacheline, but !FORCEWAKE_VLV */
+       POSTING_READ(FORCEWAKE_ACK_VLV);
 }
 
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
@@ -4364,7 +4377,8 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
-       /* The below doubles as a POSTING_READ */
+       /* something from same cacheline, but !FORCEWAKE_VLV */
+       POSTING_READ(FORCEWAKE_ACK_VLV);
        gen6_gt_check_fifodbg(dev_priv);
 }
 
index ae253e04c39105502fa1b82e05889970139f123f..42ff97d667d2b84ad339b27ab1f59110ba5831e8 100644 (file)
@@ -505,13 +505,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = init_ring_common(ring);
 
-       if (INTEL_INFO(dev)->gen > 3) {
+       if (INTEL_INFO(dev)->gen > 3)
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-               if (IS_GEN7(dev))
-                       I915_WRITE(GFX_MODE_GEN7,
-                                  _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
-                                  _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-       }
+
+       /* We need to disable the AsyncFlip performance optimisations in order
+        * to use MI_WAIT_FOR_EVENT within the CS. It should already be
+        * programmed to '1' on all products.
+        */
+       if (INTEL_INFO(dev)->gen >= 6)
+               I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
+
+       /* Required for the hardware to program scanline values for waiting */
+       if (INTEL_INFO(dev)->gen == 6)
+               I915_WRITE(GFX_MODE,
+                          _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
+
+       if (IS_GEN7(dev))
+               I915_WRITE(GFX_MODE_GEN7,
+                          _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
+                          _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
        if (INTEL_INFO(dev)->gen >= 5) {
                ret = init_pipe_control(ring);
index 827dcd4edf1c31eff9018c7c9bec37139e93602f..d7b060e0a23199b064c21d30366dd5ff78566e13 100644 (file)
@@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
 
-       linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+       linear_offset = y * fb->pitches[0] + x * pixel_size;
        sprsurf_offset =
                intel_gen4_compute_offset_xtiled(&x, &y,
-                                                fb->bits_per_pixel / 8,
-                                                fb->pitches[0]);
+                                                pixel_size, fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
        /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
 
-       linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+       linear_offset = y * fb->pitches[0] + x * pixel_size;
        dvssurf_offset =
                intel_gen4_compute_offset_xtiled(&x, &y,
-                                                fb->bits_per_pixel / 8,
-                                                fb->pitches[0]);
+                                                pixel_size, fb->pitches[0]);
        linear_offset -= dvssurf_offset;
 
        if (obj->tiling_mode != I915_TILING_NONE)
index c617f0480071ae4c07e9d524513149cfb1ac0452..8bbb58f94a193da85a769a5a387bc57b51ef5530 100644 (file)
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
 
        ret = nouveau_handle_create(nv_object(client), ~0, ~0,
                                    nv_object(client), &client->root);
-       if (ret) {
-               nouveau_namedb_destroy(&client->base);
+       if (ret)
                return ret;
-       }
 
        /* prevent init/fini being called, os in in charge of this */
        atomic_set(&nv_object(client)->usecount, 2);
index 6b0843c338775d6455322a099f568a4390d8db64..e05c15777588616f69b753e915c7c8bcb59a922f 100644 (file)
@@ -73,8 +73,11 @@ _nouveau_falcon_init(struct nouveau_object *object)
        nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
 
        /* wait for 'uc halted' to be signalled before continuing */
-       if (falcon->secret) {
-               nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+       if (falcon->secret && falcon->version < 4) {
+               if (!falcon->version)
+                       nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+               else
+                       nv_wait(falcon, 0x180, 0x80000000, 0);
                nv_wo32(falcon, 0x004, 0x00000010);
        }
 
index b8d2cbf8a7a7bb9b0f5a2c8fabfb65de9e93142a..264c2b338ac3a23fc82e1f6594d58dffcf8547d8 100644 (file)
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
        while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
                namedb = namedb->parent;
 
-       handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
        if (!handle)
                return -ENOMEM;
 
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
        }
 
        hprintk(handle, TRACE, "created\n");
+
+       *phandle = handle;
+
        return 0;
 }
 
index f74c30aa33a0e334eda733ae1a0ea5b9a3d10377..48f06378d3f9d5f9e09e03291a11d6e26527f674 100644 (file)
@@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
-       mutex_init(&subdev->mutex);
+       __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
        subdev->name = subname;
 
        if (parent) {
index 0f09af135415582ec7428d14af33351197d8f8e3..ca1a7d76a95b50d87864e95954b20aebd7906f76 100644 (file)
@@ -851,20 +851,23 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
        for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
                ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
 
-       if (nv_device(priv)->chipset  < 0x90 ||
-           nv_device(priv)->chipset == 0x92 ||
-           nv_device(priv)->chipset == 0xa0) {
-               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
-                       ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
-               i += 3;
-       } else {
-               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
-                       ctrl = nv_rd32(priv, 0x610798 + (i * 8));
-               i += 3;
+       if (!(ctrl & (1 << head))) {
+               if (nv_device(priv)->chipset  < 0x90 ||
+                   nv_device(priv)->chipset == 0x92 ||
+                   nv_device(priv)->chipset == 0xa0) {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                               ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+                       i += 4;
+               } else {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                               ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+                       i += 4;
+               }
        }
 
        if (!(ctrl & (1 << head)))
                return false;
+       i--;
 
        data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
        if (data) {
@@ -898,20 +901,23 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
        for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
                ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
 
-       if (nv_device(priv)->chipset  < 0x90 ||
-           nv_device(priv)->chipset == 0x92 ||
-           nv_device(priv)->chipset == 0xa0) {
-               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
-                       ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
-               i += 3;
-       } else {
-               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
-                       ctrl = nv_rd32(priv, 0x610794 + (i * 8));
-               i += 3;
+       if (!(ctrl & (1 << head))) {
+               if (nv_device(priv)->chipset  < 0x90 ||
+                   nv_device(priv)->chipset == 0x92 ||
+                   nv_device(priv)->chipset == 0xa0) {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                               ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+                       i += 4;
+               } else {
+                       for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                               ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+                       i += 4;
+               }
        }
 
        if (!(ctrl & (1 << head)))
                return 0x0000;
+       i--;
 
        data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
        if (!data)
index 0193532ceac97c71cdfc544039eb530ce1806963..63acc0346ff2c437cda76e06df68c709d51bc46e 100644 (file)
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
 
 int  nouveau_client_create_(const char *name, u64 device, const char *cfg,
                            const char *dbg, int, void **);
+#define nouveau_client_destroy(p)                                              \
+       nouveau_namedb_destroy(&(p)->base)
+
 int  nouveau_client_init(struct nouveau_client *);
 int  nouveau_client_fini(struct nouveau_client *, bool suspend);
 
index 5982935ee23a96896fb12d98e2dc5f723ab245e0..106bb19fdd9a65ee12b0c92241ea5045a4b692ac 100644 (file)
@@ -50,10 +50,13 @@ int  nouveau_object_fini(struct nouveau_object *, bool suspend);
 
 extern struct nouveau_ofuncs nouveau_object_ofuncs;
 
+/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
+ * ".data". */
 struct nouveau_oclass {
        u32 handle;
-       struct nouveau_ofuncs *ofuncs;
-       struct nouveau_omthds *omthds;
+       struct nouveau_ofuncs * const ofuncs;
+       struct nouveau_omthds * const omthds;
+       struct lock_class_key lock_class_key;
 };
 
 #define nv_oclass(o)    nv_object(o)->oclass
index c345097592f20bcf191f1d71db11beddfb044053..b2f3d4d0aa49fab5883234af8be8e3c44fa6c9ee 100644 (file)
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
        PLL_UNK42  = 0x42,
        PLL_VPLL0  = 0x80,
        PLL_VPLL1  = 0x81,
+       PLL_VPLL2  = 0x82,
+       PLL_VPLL3  = 0x83,
        PLL_MAX    = 0xff
 };
 
index 2917d552689bca84e3c3dde8f059abdd9b80ebda..690ed438b2adbcf6c3263d5962e84054a62fef5d 100644 (file)
@@ -1534,7 +1534,6 @@ init_io(struct nvbios_init *init)
                mdelay(10);
                init_wr32(init, 0x614100, 0x10000018);
                init_wr32(init, 0x614900, 0x10000018);
-               return;
        }
 
        value = init_rdport(init, port) & mask;
index f6962c9b6c36b6f00db2715287dedf1c3adb9f4e..7c9626258a467029cf3bc7b63b37429dfa0e4fa8 100644 (file)
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
        switch (info.type) {
        case PLL_VPLL0:
        case PLL_VPLL1:
+       case PLL_VPLL2:
+       case PLL_VPLL3:
                nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
                nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
                nv_wr32(priv, info.reg + 0x10, fN << 16);
index d6d16007ec1ac08e0e88ca35e8c4e8d20ae4e2f9..d62045f454b265df1c07ebcc9cd77207482b7dea 100644 (file)
@@ -86,8 +86,8 @@ nouveau_fb_preinit(struct nouveau_fb *pfb)
                        return ret;
        }
 
-       if (!nouveau_mm_initialised(&pfb->tags) && tags) {
-               ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+       if (!nouveau_mm_initialised(&pfb->tags)) {
+               ret = nouveau_mm_init(&pfb->tags, 0, tags ? ++tags : 0, 1);
                if (ret)
                        return ret;
        }
index 487cb8c6c204dda270d1957e869b42beae4fdccd..eac236ed19b251042ea3bfe1ef75d6da6856d188 100644 (file)
@@ -99,7 +99,7 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
        struct nouveau_bios *bios = nouveau_bios(device);
        const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
        const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       u32 size;
+       u32 size, tags = 0;
        int ret;
 
        pfb->ram.size = nv_rd32(pfb, 0x10020c);
@@ -140,10 +140,11 @@ nv50_fb_vram_init(struct nouveau_fb *pfb)
                        return ret;
 
                pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+               tags = nv_rd32(pfb, 0x100320);
                break;
        }
 
-       return nv_rd32(pfb, 0x100320);
+       return tags;
 }
 
 static int
index 306bdf121452e6e9da8d6ddeb7ba6210ddeb0bde..7606ed15b6faf55ea531e6637cc86976b24012c2 100644 (file)
@@ -145,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
        mem->memtype = type;
        mem->size = size;
 
-       mutex_lock(&mm->mutex);
+       mutex_lock(&pfb->base.mutex);
        do {
                if (back)
                        ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
                else
                        ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
                if (ret) {
-                       mutex_unlock(&mm->mutex);
+                       mutex_unlock(&pfb->base.mutex);
                        pfb->ram.put(pfb, &mem);
                        return ret;
                }
@@ -160,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                list_add_tail(&r->rl_entry, &mem->regions);
                size -= r->length;
        } while (size);
-       mutex_unlock(&mm->mutex);
+       mutex_unlock(&pfb->base.mutex);
 
        r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
        mem->offset = (u64)r->offset << 12;
index 1188227ca6aa909b88f4912608c97d1b3aa2d186..6565f3dbbe04e7e04c0721ad49f3523688f8b729 100644 (file)
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
        if (ret)
                return ret;
 
+       mutex_lock(&imem->base.mutex);
        list_add(&iobj->head, &imem->list);
+       mutex_unlock(&imem->base.mutex);
        return 0;
 }
 
 void
 nouveau_instobj_destroy(struct nouveau_instobj *iobj)
 {
-       if (iobj->head.prev)
-               list_del(&iobj->head);
+       struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
+
+       mutex_lock(&subdev->mutex);
+       list_del(&iobj->head);
+       mutex_unlock(&subdev->mutex);
+
        return nouveau_object_destroy(&iobj->base);
 }
 
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
        if (ret)
                return ret;
 
+       mutex_lock(&imem->base.mutex);
+
        list_for_each_entry(iobj, &imem->list, head) {
                if (iobj->suspend) {
                        for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
                }
        }
 
+       mutex_unlock(&imem->base.mutex);
+
        return 0;
 }
 
@@ -104,17 +114,26 @@ int
 nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
 {
        struct nouveau_instobj *iobj;
-       int i;
+       int i, ret = 0;
 
        if (suspend) {
+               mutex_lock(&imem->base.mutex);
+
                list_for_each_entry(iobj, &imem->list, head) {
                        iobj->suspend = vmalloc(iobj->size);
-                       if (iobj->suspend) {
-                               for (i = 0; i < iobj->size; i += 4)
-                                       iobj->suspend[i / 4] = nv_ro32(iobj, i);
-                       } else
-                               return -ENOMEM;
+                       if (!iobj->suspend) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+
+                       for (i = 0; i < iobj->size; i += 4)
+                               iobj->suspend[i / 4] = nv_ro32(iobj, i);
                }
+
+               mutex_unlock(&imem->base.mutex);
+
+               if (ret)
+                       return ret;
        }
 
        return nouveau_subdev_fini(&imem->base, suspend);
index 082c11b75acb8a23a6559ae7a5840b882aa67fbe..77c67fc970e64f187d0b37d533fd3bef3cc8139c 100644 (file)
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
        u64 mm_length = (offset + length) - mm_offset;
        int ret;
 
-       vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
+       vm = kzalloc(sizeof(*vm), GFP_KERNEL);
        if (!vm)
                return -ENOMEM;
 
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
                return ret;
        }
 
+       *pvm = vm;
+
        return 0;
 }
 
index 69d7b1d0b9d69e2b24f27be89cc7f869669bd4a3..1699a9083a2f86b179d0b4eba8ea1029fd52b97d 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <core/engine.h>
+#include <linux/swiotlb.h>
 
 #include <subdev/fb.h>
 #include <subdev/vm.h>
index ac340ba32017402e4fc19e26c0189961e9b96927..e620ba8271b4bc4e5d71edbd8f787a2c14eecf0b 100644 (file)
@@ -127,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
                             struct nouveau_encoder **pnv_encoder)
 {
        struct drm_device *dev = connector->dev;
+       struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
        struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
-       int i;
+       struct nouveau_i2c_port *port = NULL;
+       int i, panel = -ENODEV;
+
+       /* eDP panels need powering on by us (if the VBIOS doesn't default it
+        * to on) before doing any AUX channel transactions.  LVDS panel power
+        * is handled by the SOR itself, and not required for LVDS DDC.
+        */
+       if (nv_connector->type == DCB_CONNECTOR_eDP) {
+               panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+               if (panel == 0) {
+                       gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+                       msleep(300);
+               }
+       }
 
        for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct nouveau_i2c_port *port = NULL;
                struct nouveau_encoder *nv_encoder;
                struct drm_mode_object *obj;
                int id;
@@ -150,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
                        port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
                if (port && nv_probe_i2c(port, 0x50)) {
                        *pnv_encoder = nv_encoder;
-                       return port;
+                       break;
                }
+
+               port = NULL;
        }
 
-       return NULL;
+       /* eDP panel not detected, restore panel power GPIO to previous
+        * state to avoid confusing the SOR for other output types.
+        */
+       if (!port && panel == 0)
+               gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
+
+       return port;
 }
 
 static struct nouveau_encoder *
index e4188f24fc758a457f26cd447d5b1d62fc763e17..508b00a2ce0de6d48b80e33d1ef1bc515598b3d2 100644 (file)
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
-       /* power on internal panel if it's not already.  the init tables of
-        * some vbios default this to off for some reason, causing the
-        * panel to not work after resume
-        */
-       if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
-               gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
-               msleep(300);
-       }
-
        /* enable polling for external displays */
        drm_kms_helper_poll_enable(dev);
 
index 180a45e3b525adefa0a76ae8fa4218add782f3c0..5e7aef23825a0d33f08addfdcb97cc8e663f62b8 100644 (file)
@@ -84,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
        struct nouveau_cli *cli;
        int ret;
 
+       *pcli = NULL;
        ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
                                     nouveau_debug, size, pcli);
        cli = *pcli;
-       if (ret)
+       if (ret) {
+               if (cli)
+                       nouveau_client_destroy(&cli->base);
+               *pcli = NULL;
                return ret;
+       }
 
        mutex_init(&cli->mutex);
        return 0;
@@ -240,6 +245,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        return 0;
 }
 
+static struct lock_class_key drm_client_lock_class_key;
+
 static int
 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
 {
@@ -251,6 +258,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
        if (ret)
                return ret;
+       lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
 
        dev->dev_private = drm;
        drm->dev = dev;
index bedafd1c9539f5ae0f659c29e1c034dbd36de855..cdb83acdffe264b2144e615f96bc55d346384116 100644 (file)
@@ -60,6 +60,7 @@ u32  nv10_fence_read(struct nouveau_channel *);
 void nv10_fence_context_del(struct nouveau_channel *);
 void nv10_fence_destroy(struct nouveau_drm *);
 int  nv10_fence_create(struct nouveau_drm *);
+void nv17_fence_resume(struct nouveau_drm *drm);
 
 int nv50_fence_create(struct nouveau_drm *);
 int nv84_fence_create(struct nouveau_drm *);
index 184cdf806761c0bc0cefb52af00a8799ae8e6393..39ffc07f906b8f57fcaf2427e3a5be3bbfd76bb5 100644 (file)
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
 
 static inline bool is_powersaving_dpms(int mode)
 {
-       return (mode != DRM_MODE_DPMS_ON);
+       return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
 }
 
 static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
index 7ae7f97a6d4d84c273853d31bae664f16b7ca453..03017f24d593fa84b569a6c3f370b310b7ab09c2 100644 (file)
@@ -162,6 +162,13 @@ nv10_fence_destroy(struct nouveau_drm *drm)
        kfree(priv);
 }
 
+void nv17_fence_resume(struct nouveau_drm *drm)
+{
+       struct nv10_fence_priv *priv = drm->fence;
+
+       nouveau_bo_wr32(priv->bo, 0, priv->sequence);
+}
+
 int
 nv10_fence_create(struct nouveau_drm *drm)
 {
@@ -197,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
                if (ret == 0) {
                        nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
                        priv->base.sync = nv17_fence_sync;
+                       priv->base.resume = nv17_fence_resume;
                }
        }
 
index c20f2727ea0ba2d114699aaf2df56416e9794729..d889f3ac0d412d4affac8c1a17428270b6f0a448 100644 (file)
@@ -122,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
        if (ret == 0) {
                nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
                priv->base.sync = nv17_fence_sync;
+               priv->base.resume = nv17_fence_resume;
        }
 
        if (ret)
index 061fa0a28900612506e8af0b8c44e9d633fdb2d5..a2d478e8692a0ea4e0bf0300164c400045b4c655 100644 (file)
@@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
                                        radeon_wait_for_vblank(rdev, i);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
                                if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
                                        radeon_wait_for_vblank(rdev, i);
                                        tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        }
                        /* wait for the next frame */
@@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                blackout &= ~BLACKOUT_MODE_MASK;
                WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
        }
+       /* wait for the MC to settle */
+       udelay(100);
 }
 
 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
                        if (ASIC_IS_DCE6(rdev)) {
                                tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
                                tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
                                tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
+                               WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                        }
                        /* wait for the next frame */
                        frame_count = radeon_get_vblank_counter(rdev, i);
@@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
        WREG32(DMA_TILING_CONFIG, gb_addr_config);
 
-       tmp = gb_addr_config & NUM_PIPES_MASK;
-       tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
-                                       EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+       if ((rdev->config.evergreen.max_backends == 1) &&
+           (rdev->flags & RADEON_IS_IGP)) {
+               if ((disabled_rb_mask & 3) == 1) {
+                       /* RB0 disabled, RB1 enabled */
+                       tmp = 0x11111111;
+               } else {
+                       /* RB1 disabled, RB0 enabled */
+                       tmp = 0x00000000;
+               }
+       } else {
+               tmp = gb_addr_config & NUM_PIPES_MASK;
+               tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+                                               EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+       }
        WREG32(GB_BACKEND_MAP, tmp);
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
@@ -2401,6 +2422,12 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct evergreen_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 7a445666e71f1221ca92cd07bcdea61b29867ded..ee4cff534f100dae84d64bff2122b9b2bc89a9aa 100644 (file)
@@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                return -EINVAL;
                        }
                        if (tiled) {
-                               dst_offset = ib[idx+1];
+                               dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 7;
                        } else {
-                               dst_offset = ib[idx+1];
-                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+                               dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                /* detile bit */
                                                if (idx_value & (1 << 31)) {
                                                        /* tiled src, linear dst */
-                                                       src_offset = ib[idx+1];
+                                                       src_offset = radeon_get_ib_value(p, idx+1);
                                                        src_offset <<= 8;
                                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                                       dst_offset = ib[idx+7];
-                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       dst_offset = radeon_get_ib_value(p, idx+7);
+                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                                } else {
                                                        /* linear src, tiled dst */
-                                                       src_offset = ib[idx+7];
-                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       src_offset = radeon_get_ib_value(p, idx+7);
+                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                                       dst_offset = ib[idx+1];
+                                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                                        dst_offset <<= 8;
                                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                                }
@@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
                                                dst_offset <<= 8;
-                                               dst2_offset = ib[idx+2];
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
                                                dst2_offset <<= 8;
-                                               src_offset = ib[idx+8];
-                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+8);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                /* detile bit */
                                                if (idx_value & (1 << 31)) {
                                                        /* tiled src, linear dst */
-                                                       src_offset = ib[idx+1];
+                                                       src_offset = radeon_get_ib_value(p, idx+1);
                                                        src_offset <<= 8;
                                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                                       dst_offset = ib[idx+7];
-                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       dst_offset = radeon_get_ib_value(p, idx+7);
+                                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                                } else {
                                                        /* linear src, tiled dst */
-                                                       src_offset = ib[idx+7];
-                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       src_offset = radeon_get_ib_value(p, idx+7);
+                                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
                                                        ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                                        ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                                       dst_offset = ib[idx+1];
+                                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                                        dst_offset <<= 8;
                                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                                }
@@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        switch (misc) {
                                        case 0:
                                                /* L2L, byte */
-                                               src_offset = ib[idx+2];
-                                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                               dst_offset = ib[idx+1];
-                                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+2);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
+                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
                                                if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
                                                                 src_offset + count, radeon_bo_size(src_reloc->robj));
@@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                                        DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
                                                        return -EINVAL;
                                                }
-                                               dst_offset = ib[idx+1];
-                                               dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                               dst2_offset = ib[idx+2];
-                                               dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
-                                               src_offset = ib[idx+3];
-                                               src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                               dst_offset = radeon_get_ib_value(p, idx+1);
+                                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                               dst2_offset = radeon_get_ib_value(p, idx+2);
+                                               dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+                                               src_offset = radeon_get_ib_value(p, idx+3);
+                                               src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                                if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                        dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
                                                                 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                        }
                                } else {
                                        /* L2L, dw */
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
                                        if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
                                                dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
                                                         src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
@@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
                                DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
                                return -EINVAL;
                        }
-                       dst_offset = ib[idx+1];
-                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       dst_offset = radeon_get_ib_value(p, idx+1);
+                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
                                         dst_offset, radeon_bo_size(dst_reloc->robj));
index 896f1cbc58a5454d1b4fa421e7ed6a6c32d56a52..835992d8d067bf199bdf2ec633e634133ed16a4a 100644 (file)
@@ -1216,7 +1216,7 @@ void cayman_dma_stop(struct radeon_device *rdev)
 int cayman_dma_resume(struct radeon_device *rdev)
 {
        struct radeon_ring *ring;
-       u32 rb_cntl, dma_cntl;
+       u32 rb_cntl, dma_cntl, ib_cntl;
        u32 rb_bufsz;
        u32 reg_offset, wb_offset;
        int i, r;
@@ -1265,7 +1265,11 @@ int cayman_dma_resume(struct radeon_device *rdev)
                WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
 
                /* enable DMA IBs */
-               WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+               ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+               ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+               WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
 
                dma_cntl = RREG32(DMA_CNTL + reg_offset);
                dma_cntl &= ~CTXEMPTY_INT_ENABLE;
@@ -1409,6 +1413,12 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct evergreen_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 537e259b383720d97bbd0b452e74a1c2688eec49..becb03e8b32f6fdcc17630108c00bfc9fb4a973d 100644 (file)
@@ -1378,6 +1378,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct rv515_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
@@ -1456,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
                              u32 disabled_rb_mask)
 {
        u32 rendering_pipe_num, rb_num_width, req_rb_num;
-       u32 pipe_rb_ratio, pipe_rb_remain;
+       u32 pipe_rb_ratio, pipe_rb_remain, tmp;
        u32 data = 0, mask = 1 << (max_rb_num - 1);
        unsigned i, j;
 
        /* mask out the RBs that don't exist on that asic */
-       disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+       tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
+       /* make sure at least one RB is available */
+       if ((tmp & 0xff) != 0xff)
+               disabled_rb_mask = tmp;
 
        rendering_pipe_num = 1 << tiling_pipe_num;
        req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
@@ -2307,7 +2316,7 @@ void r600_dma_stop(struct radeon_device *rdev)
 int r600_dma_resume(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
-       u32 rb_cntl, dma_cntl;
+       u32 rb_cntl, dma_cntl, ib_cntl;
        u32 rb_bufsz;
        int r;
 
@@ -2347,7 +2356,11 @@ int r600_dma_resume(struct radeon_device *rdev)
        WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
 
        /* enable DMA IBs */
-       WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+       ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+       ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+       WREG32(DMA_IB_CNTL, ib_cntl);
 
        dma_cntl = RREG32(DMA_CNTL);
        dma_cntl &= ~CTXEMPTY_INT_ENABLE;
index 03191a56eb443424d35ffbaa84b82ffe96fa1d8a..9b2512bf1a46638f870b5967a45877455fc48c0f 100644 (file)
@@ -2476,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
        kfree(parser->relocs);
        for (i = 0; i < parser->nchunks; i++) {
                kfree(parser->chunks[i].kdata);
-               kfree(parser->chunks[i].kpage[0]);
-               kfree(parser->chunks[i].kpage[1]);
+               if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+                       kfree(parser->chunks[i].kpage[0]);
+                       kfree(parser->chunks[i].kpage[1]);
+               }
        }
        kfree(parser->chunks);
        kfree(parser->chunks_array);
@@ -2561,16 +2563,16 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
        struct radeon_cs_chunk *relocs_chunk;
        unsigned idx;
 
+       *cs_reloc = NULL;
        if (p->chunk_relocs_idx == -1) {
                DRM_ERROR("No relocation chunk !\n");
                return -EINVAL;
        }
-       *cs_reloc = NULL;
        relocs_chunk = &p->chunks[p->chunk_relocs_idx];
        idx = p->dma_reloc_idx;
-       if (idx >= relocs_chunk->length_dw) {
+       if (idx >= p->nrelocs) {
                DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
-                         idx, relocs_chunk->length_dw);
+                         idx, p->nrelocs);
                return -EINVAL;
        }
        *cs_reloc = p->relocs_ptr[idx];
@@ -2621,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                return -EINVAL;
                        }
                        if (tiled) {
-                               dst_offset = ib[idx+1];
+                               dst_offset = radeon_get_ib_value(p, idx+1);
                                dst_offset <<= 8;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                p->idx += count + 5;
                        } else {
-                               dst_offset = ib[idx+1];
-                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+                               dst_offset = radeon_get_ib_value(p, idx+1);
+                               dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
 
                                ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
@@ -2656,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                /* detile bit */
                                if (idx_value & (1 << 31)) {
                                        /* tiled src, linear dst */
-                                       src_offset = ib[idx+1];
+                                       src_offset = radeon_get_ib_value(p, idx+1);
                                        src_offset <<= 8;
                                        ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
 
-                                       dst_offset = ib[idx+5];
-                                       dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+5);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                        ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
                                } else {
                                        /* linear src, tiled dst */
-                                       src_offset = ib[idx+5];
-                                       src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+5);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
                                        ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
 
-                                       dst_offset = ib[idx+1];
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
                                        dst_offset <<= 8;
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
                                }
                                p->idx += 7;
                        } else {
                                if (p->family >= CHIP_RV770) {
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
 
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2689,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                        ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
                                        p->idx += 5;
                                } else {
-                                       src_offset = ib[idx+2];
-                                       src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
-                                       dst_offset = ib[idx+1];
-                                       dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
+                                       src_offset = radeon_get_ib_value(p, idx+2);
+                                       src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+                                       dst_offset = radeon_get_ib_value(p, idx+1);
+                                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
 
                                        ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
                                        ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
@@ -2722,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
                                DRM_ERROR("bad DMA_PACKET_WRITE\n");
                                return -EINVAL;
                        }
-                       dst_offset = ib[idx+1];
-                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       dst_offset = radeon_get_ib_value(p, idx+1);
+                       dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
                        if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
                                dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
                                         dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
index 34e52304a525001de8bba62d71278ae939edfda9..a08f657329a0e821d681678a6e1f74de357b042e 100644 (file)
@@ -324,7 +324,6 @@ struct radeon_bo {
        struct list_head                list;
        /* Protected by tbo.reserved */
        u32                             placements[3];
-       u32                             busy_placements[3];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
@@ -654,6 +653,8 @@ struct radeon_ring {
        u32                     ptr_reg_mask;
        u32                     nop;
        u32                     idx;
+       u64                     last_semaphore_signal_addr;
+       u64                     last_semaphore_wait_addr;
 };
 
 /*
index 9056fafb00ea35eda70d2a576726abf677808750..0b202c07fe509075ed9bfedb3c8d9b01feb5d8d8 100644 (file)
@@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
        .vm = {
                .init = &si_vm_init,
                .fini = &si_vm_fini,
-               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
                .set_page = &si_vm_set_page,
        },
        .ring = {
index 33a56a09ff10da2feea83f33388c15fad5041574..3e403bdda58fc8eb69b3583d647770f2b9b41210 100644 (file)
@@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
                                                                   1),
                                                                  ATOM_DEVICE_CRT1_SUPPORT);
                                }
+                               /* RV100 board with external TDMS bit mis-set.
+                                * Actually uses internal TMDS, clear the bit.
+                                */
+                               if (dev->pdev->device == 0x5159 &&
+                                   dev->pdev->subsystem_vendor == 0x1014 &&
+                                   dev->pdev->subsystem_device == 0x029A) {
+                                       tmp &= ~(1 << 4);
+                               }
                                if ((tmp >> 4) & 0x1) {
                                        devices |= ATOM_DEVICE_DFP2_SUPPORT;
                                        radeon_add_legacy_encoder(dev,
index 396baba0141a9c2abefbd5e8782a83e5634c4c96..5407459e56d2b5dd9619e698c2ec306c56104f4a 100644 (file)
@@ -279,13 +279,15 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
                                  p->chunks[p->chunk_ib_idx].length_dw);
                        return -EINVAL;
                }
-               if ((p->rdev->flags & RADEON_IS_AGP)) {
+               if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
                        p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
                            p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
-                               kfree(p->chunks[i].kpage[0]);
-                               kfree(p->chunks[i].kpage[1]);
+                               kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+                               kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+                               p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+                               p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
                                return -ENOMEM;
                        }
                }
@@ -583,7 +585,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
        struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
        int i;
        int size = PAGE_SIZE;
-       bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
+       bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+               false : true;
 
        for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
                if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
index ad6df625e8b8a1cf869ac06bb898503022406a6b..0d67674b64b13e9a4618548fb1ca3d2d2211f797 100644 (file)
@@ -241,7 +241,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
                y = 0;
        }
 
-       if (ASIC_IS_AVIVO(rdev)) {
+       /* fixed on DCE6 and newer */
+       if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
                int i = 0;
                struct drm_crtc *crtc_p;
 
index edfc54e41842248f4dad035c1ece973ca08837b5..0d6562bb0c93e61e7b57e4fd09f31a802ba34d99 100644 (file)
@@ -429,7 +429,8 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
-       if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
+       if (efi_enabled(EFI_BOOT) &&
+           rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
                return false;
 
        /* first check CRTCs */
index 1da2386d7cf74a9eac82674a53ac2cee3e8e52a5..05c96fa0b0515a9eab3a89b6bcab97fd1af888dd 100644 (file)
@@ -1115,14 +1115,16 @@ radeon_user_framebuffer_create(struct drm_device *dev,
        }
 
        radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
-       if (radeon_fb == NULL)
+       if (radeon_fb == NULL) {
+               drm_gem_object_unreference_unlocked(obj);
                return ERR_PTR(-ENOMEM);
+       }
 
        ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
        if (ret) {
                kfree(radeon_fb);
                drm_gem_object_unreference_unlocked(obj);
-               return NULL;
+               return ERR_PTR(ret);
        }
 
        return &radeon_fb->base;
index dff6cf77f9532266b7fe587500bab82b53024a6d..d9bf96ee299acc88181f003b1eff5dc226f7ced4 100644 (file)
  *   2.26.0 - r600-eg: fix htile size computation
  *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
  *   2.28.0 - r600-eg: Add MEM_WRITE packet support
+ *   2.29.0 - R500 FP16 color clear registers
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       28
+#define KMS_DRIVER_MINOR       29
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index f5ba2241dacc6cfd2bb6b55fead51a7ae2c395e8..62cd512f5c8d4161ff3708204fe32f560566402e 100644 (file)
@@ -640,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
        enum drm_connector_status found = connector_status_disconnected;
        bool color = true;
 
+       /* just don't bother on RN50 those chip are often connected to remoting
+        * console hw and often we get failure to load detect those. So to make
+        * everyone happy report the encoder as always connected.
+        */
+       if (ASIC_IS_RN50(rdev)) {
+               return connector_status_connected;
+       }
+
        /* save the regs we need */
        vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
        crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
index 883c95d8d90f4559b04b8a6e4ca319a896629681..d3aface2d12d6f3a03ee5f5a2667b42caf52d7d5 100644 (file)
@@ -84,6 +84,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        rbo->placement.fpfn = 0;
        rbo->placement.lpfn = 0;
        rbo->placement.placement = rbo->placements;
+       rbo->placement.busy_placement = rbo->placements;
        if (domain & RADEON_GEM_DOMAIN_VRAM)
                rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                                        TTM_PL_FLAG_VRAM;
@@ -104,14 +105,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        if (!c)
                rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
        rbo->placement.num_placement = c;
-
-       c = 0;
-       rbo->placement.busy_placement = rbo->busy_placements;
-       if (rbo->rdev->flags & RADEON_IS_AGP) {
-               rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
-       } else {
-               rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
-       }
        rbo->placement.num_busy_placement = c;
 }
 
@@ -357,6 +350,7 @@ int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
+       u32 domain;
        int r;
 
        r = ttm_eu_reserve_buffers(head);
@@ -366,9 +360,17 @@ int radeon_bo_list_validate(struct list_head *head)
        list_for_each_entry(lobj, head, tv.head) {
                bo = lobj->bo;
                if (!bo->pin_count) {
+                       domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+                       
+               retry:
+                       radeon_ttm_placement_from_domain(bo, domain);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
                                                true, false);
                        if (unlikely(r)) {
+                               if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+                                       domain |= RADEON_GEM_DOMAIN_GTT;
+                                       goto retry;
+                               }
                                return r;
                        }
                }
index 141f2b6a9cf2ab920ecaac2e1cbae85f57d1b904..cd72062d5a9134bd596c8a6388b78fd45bb02425 100644 (file)
@@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
 {
        int r;
 
+       /* make sure we aren't trying to allocate more space than there is on the ring */
+       if (ndw > (ring->ring_size / 4))
+               return -ENOMEM;
        /* Align requested size with padding so unlock_commit can
         * pad safely */
        ndw = (ndw + ring->align_mask) & ~ring->align_mask;
@@ -784,6 +787,8 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
        }
        seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
        seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+       seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+       seq_printf(m, "last semaphore wait addr   : 0x%016llx\n", ring->last_semaphore_wait_addr);
        seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
        seq_printf(m, "%u dwords in ring\n", count);
        /* print 8 dw before current rptr as often it's the last executed
index 97f3ece81cd257bba371c57d126bb54ddbfbee18..8dcc20f53d734ebe12b24f57b175e69988d93309 100644 (file)
@@ -95,6 +95,10 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
        /* we assume caller has already allocated space on waiters ring */
        radeon_semaphore_emit_wait(rdev, waiter, semaphore);
 
+       /* for debugging lockup only, used by sysfs debug files */
+       rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+       rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
        return 0;
 }
 
index 1d8ff2f850ba0d6d4a4a47c68913bc4cbb4392e7..93f760e27a9200a81b94dfee8ce6d14ac935b49a 100644 (file)
@@ -38,6 +38,7 @@
 #include <drm/radeon_drm.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/swiotlb.h>
 #include "radeon_reg.h"
 #include "radeon.h"
 
index 0f656b111c15f913f6c09045f045b81bf016c2b2..a072fa8c46b04a00a9c78886b20a29615f7ad86c 100644 (file)
@@ -1,5 +1,6 @@
 cayman 0x9400
 0x0000802C GRBM_GFX_INDEX
+0x00008040 WAIT_UNTIL
 0x000084FC CP_STRMOUT_CNTL
 0x000085F0 CP_COHER_CNTL
 0x000085F4 CP_COHER_SIZE
index 911a8fbd32bb3fcafe37bd0179dd8fd28157e948..78d5e99d759da97426cdc548857971f3820e5427 100644 (file)
@@ -324,6 +324,8 @@ rv515 0x6d40
 0x46AC US_OUT_FMT_2
 0x46B0 US_OUT_FMT_3
 0x46B4 US_W_FMT
+0x46C0 RB3D_COLOR_CLEAR_VALUE_AR
+0x46C4 RB3D_COLOR_CLEAR_VALUE_GB
 0x4BC0 FG_FOG_BLEND
 0x4BC4 FG_FOG_FACTOR
 0x4BC8 FG_FOG_COLOR_R
index 2bb6d0e84b3d94361546d06705c5d3252b7126bf..435ed35513643b868907716b9b4a231de4d0a58c 100644 (file)
@@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
                                WREG32(R600_CITF_CNTL, blackout);
                }
        }
+       /* wait for the MC to settle */
+       udelay(100);
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
index 3240a3d64f3066d610b623c1e5d48269faf419ab..ae8b48205a6c10257de328c0b669628436e8b0a0 100644 (file)
@@ -2215,6 +2215,12 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
 {
        struct evergreen_mc_save save;
 
+       if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+               reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
+
+       if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+               reset_mask &= ~RADEON_RESET_DMA;
+
        if (reset_mask == 0)
                return 0;
 
index 33d20be87db5f4e6070a5c20fe114816b88e7efe..52b20b12c83a3859da1b5fbbc1449fd3d4e907a4 100644 (file)
@@ -434,6 +434,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                        bo->mem = tmp_mem;
                        bdev->driver->move_notify(bo, mem);
                        bo->mem = *mem;
+                       *mem = tmp_mem;
                }
 
                goto out_err;
index d73d6e3e17b2927c6215213e872e155a3badbfab..8be35c809c7b612c5a33bc5271051273bd58cd92 100644 (file)
@@ -344,8 +344,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
        if (ttm->state == tt_unpopulated) {
                ret = ttm->bdev->driver->ttm_tt_populate(ttm);
-               if (ret)
+               if (ret) {
+                       /* if we fail here don't nuke the mm node
+                        * as the bo still owns it */
+                       old_copy.mm_node = NULL;
                        goto out1;
+               }
        }
 
        add = 0;
@@ -371,8 +375,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
                                                   prot);
                } else
                        ret = ttm_copy_io_page(new_iomap, old_iomap, page);
-               if (ret)
+               if (ret) {
+                       /* failing here, means keep old copy as-is */
+                       old_copy.mm_node = NULL;
                        goto out1;
+               }
        }
        mb();
 out2:
@@ -422,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_driver *driver = bdev->driver;
 
-       fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
+       fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
        if (!fbo)
                return -ENOMEM;
 
@@ -441,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        fbo->vm_node = NULL;
        atomic_set(&fbo->cpu_writers, 0);
 
-       fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       spin_lock(&bdev->fence_lock);
+       if (bo->sync_obj)
+               fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+       else
+               fbo->sync_obj = NULL;
+       spin_unlock(&bdev->fence_lock);
        kref_init(&fbo->list_kref);
        kref_init(&fbo->kref);
        fbo->destroy = &ttm_transfered_destroy;
@@ -654,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                 */
 
                set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-
-               /* ttm_buffer_object_transfer accesses bo->sync_obj */
-               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                spin_unlock(&bdev->fence_lock);
                if (tmp_obj)
                        driver->sync_obj_unref(&tmp_obj);
 
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
                        return ret;
 
index 512f44add89f41fae6706114c5ce497279a4f107..fe5cdbcf263605b7b5ec853b4d5f354f7c754b13 100644 (file)
 static u8 *udl_get_edid(struct udl_device *udl)
 {
        u8 *block;
-       char rbuf[3];
+       char *rbuf;
        int ret, i;
 
        block = kmalloc(EDID_LENGTH, GFP_KERNEL);
        if (block == NULL)
                return NULL;
 
+       rbuf = kmalloc(2, GFP_KERNEL);
+       if (rbuf == NULL)
+               goto error;
+
        for (i = 0; i < EDID_LENGTH; i++) {
                ret = usb_control_msg(udl->ddev->usbdev,
                                      usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
                                      HZ);
                if (ret < 1) {
                        DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
-                       i--;
                        goto error;
                }
                block[i] = rbuf[1];
        }
 
+       kfree(rbuf);
        return block;
 
 error:
        kfree(block);
+       kfree(rbuf);
        return NULL;
 }
 
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
 
        edid = (struct edid *)udl_get_edid(udl);
 
+       /*
+        * We only read the main block, but if the monitor reports extension
+        * blocks then the drm edid code expects them to be present, so patch
+        * the extension count to 0.
+        */
+       edid->checksum += edid->extensions;
+       edid->extensions = 0;
+
        drm_mode_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        kfree(edid);
index 4dfa605e2d14417203e734c4a68dc2447e86f5c2..34e25471aeaaaa547f1e4ea1a1879426d7350444 100644 (file)
 #define USB_VENDOR_ID_EZKEY            0x0518
 #define USB_DEVICE_ID_BTC_8193         0x0002
 
+#define USB_VENDOR_ID_FORMOSA          0x147a
+#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER      0xe03e
+
 #define USB_VENDOR_ID_FREESCALE                0x15A2
 #define USB_DEVICE_ID_FREESCALE_MX28   0x004F
 
index 12e4fdc810bf22c0c3334cb1939b6de1e73ee2d3..e766b5614ef59fcae45aaeff49a98df9de3e9693 100644 (file)
@@ -540,13 +540,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
 {
        struct i2c_client *client = hid->driver_data;
        int report_id = buf[0];
+       int ret;
 
        if (report_type == HID_INPUT_REPORT)
                return -EINVAL;
 
-       return i2c_hid_set_report(client,
+       if (report_id) {
+               buf++;
+               count--;
+       }
+
+       ret = i2c_hid_set_report(client,
                                report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
                                report_id, buf, count);
+
+       if (report_id && ret >= 0)
+               ret++; /* add report_id to the number of transfered bytes */
+
+       return ret;
 }
 
 static int i2c_hid_parse(struct hid_device *hid)
index ac9e35228254f0e6a942845520bb542752ca0921..e0e6abf1cd3bd9b3cfa3d7cc73a2247c84c31be4 100644 (file)
@@ -70,6 +70,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
index b38ef6d8d0492d7392814021f7ac30c7813ab361..64630f15f1816231ddc83feb88eab58e872cca3d 100644 (file)
@@ -2,7 +2,7 @@ menu "Microsoft Hyper-V guest support"
 
 config HYPERV
        tristate "Microsoft Hyper-V client drivers"
-       depends on X86 && ACPI && PCI
+       depends on X86 && ACPI && PCI && X86_LOCAL_APIC
        help
          Select this option to run Linux as a Hyper-V client operating
          system.
index f6c0011a0337e53ff783837a0970a2002675ddb5..dd289fd179ca8e3953b3d8f2d0b65c94fb5b878e 100644 (file)
@@ -403,7 +403,7 @@ struct dm_info_header {
  */
 
 struct dm_info_msg {
-       struct dm_info_header header;
+       struct dm_header hdr;
        __u32 reserved;
        __u32 info_size;
        __u8  info[];
@@ -503,13 +503,17 @@ static void hot_add_req(struct hv_dynmem_device *dm, struct dm_hot_add *msg)
 
 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
 {
-       switch (msg->header.type) {
+       struct dm_info_header *info_hdr;
+
+       info_hdr = (struct dm_info_header *)msg->info;
+
+       switch (info_hdr->type) {
        case INFO_TYPE_MAX_PAGE_CNT:
                pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
-               pr_info("Data Size is %d\n", msg->header.data_size);
+               pr_info("Data Size is %d\n", info_hdr->data_size);
                break;
        default:
-               pr_info("Received Unknown type: %d\n", msg->header.type);
+               pr_info("Received Unknown type: %d\n", info_hdr->type);
        }
 }
 
@@ -879,7 +883,7 @@ static int balloon_probe(struct hv_device *dev,
                        balloon_onchannelcallback, dev);
 
        if (ret)
-               return ret;
+               goto probe_error0;
 
        dm_device.dev = dev;
        dm_device.state = DM_INITIALIZING;
@@ -891,7 +895,7 @@ static int balloon_probe(struct hv_device *dev,
                 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
        if (IS_ERR(dm_device.thread)) {
                ret = PTR_ERR(dm_device.thread);
-               goto probe_error0;
+               goto probe_error1;
        }
 
        hv_set_drvdata(dev, &dm_device);
@@ -914,12 +918,12 @@ static int balloon_probe(struct hv_device *dev,
                                VM_PKT_DATA_INBAND,
                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret)
-               goto probe_error1;
+               goto probe_error2;
 
        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
        if (t == 0) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
 
        /*
@@ -928,7 +932,7 @@ static int balloon_probe(struct hv_device *dev,
         */
        if (dm_device.state == DM_INIT_ERROR) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
        /*
         * Now submit our capabilities to the host.
@@ -961,12 +965,12 @@ static int balloon_probe(struct hv_device *dev,
                                VM_PKT_DATA_INBAND,
                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret)
-               goto probe_error1;
+               goto probe_error2;
 
        t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
        if (t == 0) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
 
        /*
@@ -975,18 +979,20 @@ static int balloon_probe(struct hv_device *dev,
         */
        if (dm_device.state == DM_INIT_ERROR) {
                ret = -ETIMEDOUT;
-               goto probe_error1;
+               goto probe_error2;
        }
 
        dm_device.state = DM_INITIALIZED;
 
        return 0;
 
-probe_error1:
+probe_error2:
        kthread_stop(dm_device.thread);
 
-probe_error0:
+probe_error1:
        vmbus_close(dev->channel);
+probe_error0:
+       kfree(send_buffer);
        return ret;
 }
 
@@ -999,6 +1005,7 @@ static int balloon_remove(struct hv_device *dev)
 
        vmbus_close(dev->channel);
        kthread_stop(dm->thread);
+       kfree(send_buffer);
 
        return 0;
 }
index 32f238f3caea852ad116fa9eab465c945fa0d6e3..89ac1cb26f24e6649b40ec253436bc4ee96437a5 100644 (file)
@@ -180,11 +180,11 @@ config SENSORS_ADM9240
          will be called adm9240.
 
 config SENSORS_ADT7410
-       tristate "Analog Devices ADT7410"
+       tristate "Analog Devices ADT7410/ADT7420"
        depends on I2C
        help
          If you say yes here you get support for the Analog Devices
-         ADT7410 temperature monitoring chip.
+         ADT7410 and ADT7420 temperature monitoring chips.
 
          This driver can also be built as a module. If so, the module
          will be called adt7410.
@@ -506,7 +506,8 @@ config SENSORS_IT87
        help
          If you say yes here you get support for ITE IT8705F, IT8712F,
          IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
-         IT8782F, and IT8783E/F sensor chips, and the SiS950 clone.
+         IT8771E, IT8772E, IT8782F, and IT8783E/F sensor chips, and the
+         SiS950 clone.
 
          This driver can also be built as a module.  If so, the module
          will be called it87.
@@ -529,8 +530,8 @@ config SENSORS_JC42
          temperature sensors, which are used on many DDR3 memory modules for
          mobile devices and servers.  Support will include, but not be limited
          to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
-         MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
-         STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
+         MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
+         STTS2002, STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
 
          This driver can also be built as a module.  If so, the module
          will be called jc42.
@@ -854,6 +855,17 @@ config SENSORS_MAX6650
          This driver can also be built as a module.  If so, the module
          will be called max6650.
 
+config SENSORS_MAX6697
+       tristate "Maxim MAX6697 and compatibles"
+       depends on I2C
+       help
+         If you say yes here you get support for MAX6581, MAX6602, MAX6622,
+         MAX6636, MAX6689, MAX6693, MAX6694, MAX6697, MAX6698, and MAX6699
+         temperature sensor chips.
+
+         This driver can also be built as a module.  If so, the module
+         will be called max6697.
+
 config SENSORS_MCP3021
        tristate "Microchip MCP3021 and compatibles"
        depends on I2C
@@ -1145,6 +1157,16 @@ config SENSORS_AMC6821
          This driver can also be build as a module.  If so, the module
          will be called amc6821.
 
+config SENSORS_INA209
+       tristate "TI / Burr Brown INA209"
+       depends on I2C
+       help
+         If you say yes here you get support for the TI / Burr Brown INA209
+         voltage / current / power monitor I2C interface.
+
+         This driver can also be built as a module. If so, the module will
+         be called ina209.
+
 config SENSORS_INA2XX
        tristate "Texas Instruments INA219 and compatibles"
        depends on I2C
index 5da287443f6c3d007ae96b4e7a9d5d912fe3d3d7..8d6d97ea7c1e3d9e161f425cb236c79fce18c295 100644 (file)
@@ -65,6 +65,7 @@ obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
 obj-$(CONFIG_SENSORS_I5K_AMB)  += i5k_amb.o
 obj-$(CONFIG_SENSORS_IBMAEM)   += ibmaem.o
 obj-$(CONFIG_SENSORS_IBMPEX)   += ibmpex.o
+obj-$(CONFIG_SENSORS_INA209)   += ina209.o
 obj-$(CONFIG_SENSORS_INA2XX)   += ina2xx.o
 obj-$(CONFIG_SENSORS_IT87)     += it87.o
 obj-$(CONFIG_SENSORS_JC42)     += jc42.o
@@ -99,6 +100,7 @@ obj-$(CONFIG_SENSORS_MAX197) += max197.o
 obj-$(CONFIG_SENSORS_MAX6639)  += max6639.o
 obj-$(CONFIG_SENSORS_MAX6642)  += max6642.o
 obj-$(CONFIG_SENSORS_MAX6650)  += max6650.o
+obj-$(CONFIG_SENSORS_MAX6697)  += max6697.o
 obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
 obj-$(CONFIG_SENSORS_MCP3021)  += mcp3021.o
 obj-$(CONFIG_SENSORS_NTC_THERMISTOR)   += ntc_thermistor.o
index f3a5d4764eb95be740066d7aaf94ea2a5db3c0a2..5d501adc3e54b3c477ffe125319e6473bdf10467 100644 (file)
@@ -137,7 +137,7 @@ static ssize_t set_max_min(struct device *dev,
        if (ret < 0)
                return ret;
 
-       temp = SENSORS_LIMIT(temp, -40000, 85000);
+       temp = clamp_val(temp, -40000, 85000);
        temp = (temp + (temp < 0 ? -500 : 500)) / 1000;
 
        mutex_lock(&data->lock);
index fd1d1b15854ee6bedfab249ff31c0036b57a789a..71bcba8abfc0a927ffe88403ea2d2ce3339913d7 100644 (file)
@@ -193,7 +193,7 @@ static ssize_t set_temp_max(struct device *dev,
        temp /= 1000;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[index] = SENSORS_LIMIT(temp, -128, 127);
+       data->temp_max[index] = clamp_val(temp, -128, 127);
        if (!read_only)
                i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
                                          data->temp_max[index]);
@@ -218,7 +218,7 @@ static ssize_t set_temp_min(struct device *dev,
        temp /= 1000;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[index] = SENSORS_LIMIT(temp, -128, 127);
+       data->temp_min[index] = clamp_val(temp, -128, 127);
        if (!read_only)
                i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
                                          data->temp_min[index]);
index 0f068e7297ee10c8c2e8ab11b5fbc1eefb32fe32..ea09046e651d19d8585bb26b07bc7ba2339f9e3d 100644 (file)
@@ -197,7 +197,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
        };
 #define NEG12_OFFSET  16000
 #define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from))
-#define INS_TO_REG(n, val)  (SENSORS_LIMIT(SCALE(val, adm1026_scaling[n], 192),\
+#define INS_TO_REG(n, val)  (clamp_val(SCALE(val, adm1026_scaling[n], 192),\
        0, 255))
 #define INS_FROM_REG(n, val) (SCALE(val, 192, adm1026_scaling[n]))
 
@@ -207,7 +207,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
  *      22500 kHz * 60 (sec/min) * 2 (pulse) / 2 (pulse/rev) == 1350000
  */
 #define FAN_TO_REG(val, div)  ((val) <= 0 ? 0xff : \
-                               SENSORS_LIMIT(1350000 / ((val) * (div)), \
+                               clamp_val(1350000 / ((val) * (div)), \
                                              1, 254))
 #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \
                                1350000 / ((val) * (div)))
@@ -215,14 +215,14 @@ static int adm1026_scaling[] = { /* .001 Volts */
 #define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
 
 /* Temperature is reported in 1 degC increments */
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \
+#define TEMP_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
                                        / 1000, -127, 127))
 #define TEMP_FROM_REG(val) ((val) * 1000)
-#define OFFSET_TO_REG(val) (SENSORS_LIMIT(((val) + ((val) < 0 ? -500 : 500)) \
+#define OFFSET_TO_REG(val) (clamp_val(((val) + ((val) < 0 ? -500 : 500)) \
                                          / 1000, -127, 127))
 #define OFFSET_FROM_REG(val) ((val) * 1000)
 
-#define PWM_TO_REG(val) (SENSORS_LIMIT(val, 0, 255))
+#define PWM_TO_REG(val) (clamp_val(val, 0, 255))
 #define PWM_FROM_REG(val) (val)
 
 #define PWM_MIN_TO_REG(val) ((val) & 0xf0)
@@ -233,7 +233,7 @@ static int adm1026_scaling[] = { /* .001 Volts */
  *   indicates that the DAC could be used to drive the fans, but in our
  *   example board (Arima HDAMA) it isn't connected to the fans at all.
  */
-#define DAC_TO_REG(val) (SENSORS_LIMIT(((((val) * 255) + 500) / 2500), 0, 255))
+#define DAC_TO_REG(val) (clamp_val(((((val) * 255) + 500) / 2500), 0, 255))
 #define DAC_FROM_REG(val) (((val) * 2500) / 255)
 
 /*
@@ -933,7 +933,7 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
                return;
 
        new_min = data->fan_min[fan] * old_div / new_div;
-       new_min = SENSORS_LIMIT(new_min, 1, 254);
+       new_min = clamp_val(new_min, 1, 254);
        data->fan_min[fan] = new_min;
        adm1026_write_value(client, ADM1026_REG_FAN_MIN(fan), new_min);
 }
@@ -1527,7 +1527,7 @@ static ssize_t set_auto_pwm_min(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->pwm1.auto_pwm_min = SENSORS_LIMIT(val, 0, 255);
+       data->pwm1.auto_pwm_min = clamp_val(val, 0, 255);
        if (data->pwm1.enable == 2) { /* apply immediately */
                data->pwm1.pwm = PWM_TO_REG((data->pwm1.pwm & 0x0f) |
                        PWM_MIN_TO_REG(data->pwm1.auto_pwm_min));
index c6a4631e833f78a1143fa7b81ad1bc8f8b237d6f..253ea396106db45e18ae6b1a54022029d5b7afc9 100644 (file)
@@ -162,13 +162,13 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
 static int FAN_TO_REG(int reg, int div)
 {
        int tmp;
-       tmp = FAN_FROM_REG(SENSORS_LIMIT(reg, 0, 65535), div);
+       tmp = FAN_FROM_REG(clamp_val(reg, 0, 65535), div);
        return tmp > 255 ? 255 : tmp;
 }
 
 #define FAN_DIV_FROM_REG(reg)          (1<<(((reg)&0xc0)>>6))
 
-#define PWM_TO_REG(val)                        (SENSORS_LIMIT((val), 0, 255) >> 4)
+#define PWM_TO_REG(val)                        (clamp_val((val), 0, 255) >> 4)
 #define PWM_FROM_REG(val)              ((val) << 4)
 
 #define FAN_CHAN_FROM_REG(reg)         (((reg) >> 5) & 7)
@@ -675,7 +675,7 @@ static ssize_t set_temp_offset(struct device *dev,
        if (ret)
                return ret;
 
-       val = SENSORS_LIMIT(val, -15000, 15000);
+       val = clamp_val(val, -15000, 15000);
        mutex_lock(&data->update_lock);
        data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_OFFSET(nr),
@@ -696,7 +696,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
        mutex_lock(&data->update_lock);
        data->temp_min[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +717,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +738,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
        mutex_lock(&data->update_lock);
        data->temp_crit[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
index dafa477715e327fcb324c86e51dba46e1afeb450..2416628e0ab1d78a264a4c8bc6a606a0057ed3fb 100644 (file)
@@ -98,13 +98,13 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
 
 static inline u8 IN_TO_REG(unsigned long val, int n)
 {
-       return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
+       return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
 }
 
 /* temperature range: -40..125, 127 disables temperature alarm */
 static inline s8 TEMP_TO_REG(long val)
 {
-       return SENSORS_LIMIT(SCALE(val, 1, 1000), -40, 127);
+       return clamp_val(SCALE(val, 1, 1000), -40, 127);
 }
 
 /* two fans, each with low fan speed limit */
@@ -122,7 +122,7 @@ static inline unsigned int FAN_FROM_REG(u8 reg, u8 div)
 /* analog out 0..1250mV */
 static inline u8 AOUT_TO_REG(unsigned long val)
 {
-       return SENSORS_LIMIT(SCALE(val, 255, 1250), 0, 255);
+       return clamp_val(SCALE(val, 255, 1250), 0, 255);
 }
 
 static inline unsigned int AOUT_FROM_REG(u8 reg)
index 409b5c16defb1c0169c3871695c3b7b7c3d08fc0..ba962ac4b81f2e63519ee12138e1b2e298befe90 100644 (file)
@@ -163,9 +163,9 @@ static int ads7828_probe(struct i2c_client *client,
 
        /* Bound Vref with min/max values if it was provided */
        if (data->vref_mv)
-               data->vref_mv = SENSORS_LIMIT(data->vref_mv,
-                                             ADS7828_EXT_VREF_MV_MIN,
-                                             ADS7828_EXT_VREF_MV_MAX);
+               data->vref_mv = clamp_val(data->vref_mv,
+                                         ADS7828_EXT_VREF_MV_MIN,
+                                         ADS7828_EXT_VREF_MV_MAX);
        else
                data->vref_mv = ADS7828_INT_VREF_MV;
 
index 030c8d7c33a57b8f03463c4fbb220e5daf81b5d9..99a7290da0a343c4e0e970c57eebfeca86486589 100644 (file)
@@ -78,10 +78,6 @@ enum adt7410_type {          /* keep sorted in alphabetical order */
        adt7410,
 };
 
-/* Addresses scanned */
-static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
-                                       I2C_CLIENT_END };
-
 static const u8 ADT7410_REG_TEMP[4] = {
        ADT7410_TEMPERATURE,            /* input */
        ADT7410_T_ALARM_HIGH,           /* high */
@@ -173,8 +169,8 @@ abort:
 
 static s16 ADT7410_TEMP_TO_REG(long temp)
 {
-       return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, ADT7410_TEMP_MIN,
-                                              ADT7410_TEMP_MAX) * 128, 1000);
+       return DIV_ROUND_CLOSEST(clamp_val(temp, ADT7410_TEMP_MIN,
+                                          ADT7410_TEMP_MAX) * 128, 1000);
 }
 
 static int ADT7410_REG_TO_TEMP(struct adt7410_data *data, s16 reg)
@@ -269,9 +265,9 @@ static ssize_t adt7410_set_t_hyst(struct device *dev,
                return ret;
        /* convert absolute hysteresis value to a 4 bit delta value */
        limit = ADT7410_REG_TO_TEMP(data, data->temp[1]);
-       hyst = SENSORS_LIMIT(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
-       data->hyst = SENSORS_LIMIT(DIV_ROUND_CLOSEST(limit - hyst, 1000),
-                                  0, ADT7410_T_HYST_MASK);
+       hyst = clamp_val(hyst, ADT7410_TEMP_MIN, ADT7410_TEMP_MAX);
+       data->hyst = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
+                              ADT7410_T_HYST_MASK);
        ret = i2c_smbus_write_byte_data(client, ADT7410_T_HYST, data->hyst);
        if (ret)
                return ret;
@@ -364,6 +360,7 @@ static int adt7410_probe(struct i2c_client *client,
        /*
         * Set to 16 bit resolution, continous conversion and comparator mode.
         */
+       ret &= ~ADT7410_MODE_MASK;
        data->config = ret | ADT7410_FULL | ADT7410_RESOLUTION |
                        ADT7410_EVENT_MODE;
        if (data->config != data->oldconfig) {
@@ -410,11 +407,12 @@ static int adt7410_remove(struct i2c_client *client)
 
 static const struct i2c_device_id adt7410_ids[] = {
        { "adt7410", adt7410, },
+       { "adt7420", adt7410, },
        { /* LIST END */ }
 };
 MODULE_DEVICE_TABLE(i2c, adt7410_ids);
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int adt7410_suspend(struct device *dev)
 {
        int ret;
@@ -436,10 +434,8 @@ static int adt7410_resume(struct device *dev)
        return ret;
 }
 
-static const struct dev_pm_ops adt7410_dev_pm_ops = {
-       .suspend        = adt7410_suspend,
-       .resume         = adt7410_resume,
-};
+static SIMPLE_DEV_PM_OPS(adt7410_dev_pm_ops, adt7410_suspend, adt7410_resume);
+
 #define ADT7410_DEV_PM_OPS (&adt7410_dev_pm_ops)
 #else
 #define ADT7410_DEV_PM_OPS NULL
@@ -454,11 +450,11 @@ static struct i2c_driver adt7410_driver = {
        .probe          = adt7410_probe,
        .remove         = adt7410_remove,
        .id_table       = adt7410_ids,
-       .address_list   = normal_i2c,
+       .address_list   = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
 };
 
 module_i2c_driver(adt7410_driver);
 
 MODULE_AUTHOR("Hartmut Knaack");
-MODULE_DESCRIPTION("ADT7410 driver");
+MODULE_DESCRIPTION("ADT7410/ADT7420 driver");
 MODULE_LICENSE("GPL");
index 98a7d81e25c5f18de577c31db321481c0b0d225d..69481d3a3d231fec48e1f43f0489e20879613daf 100644 (file)
@@ -836,7 +836,7 @@ static ssize_t set_temp_min(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -874,7 +874,7 @@ static ssize_t set_temp_max(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -939,7 +939,7 @@ static ssize_t set_volt_max(struct device *dev,
 
        temp *= 1000; /* convert mV to uV */
        temp = DIV_ROUND_CLOSEST(temp, x);
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->volt_max[attr->index] = temp;
@@ -981,7 +981,7 @@ static ssize_t set_volt_min(struct device *dev,
 
        temp *= 1000; /* convert mV to uV */
        temp = DIV_ROUND_CLOSEST(temp, x);
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->volt_min[attr->index] = temp;
@@ -1071,7 +1071,7 @@ static ssize_t set_fan_min(struct device *dev,
 
        temp = FAN_RPM_TO_PERIOD(temp);
        temp >>= 8;
-       temp = SENSORS_LIMIT(temp, 1, 255);
+       temp = clamp_val(temp, 1, 255);
 
        mutex_lock(&data->lock);
        data->fan_min[attr->index] = temp;
@@ -1149,7 +1149,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm[attr->index] = temp;
@@ -1179,7 +1179,7 @@ static ssize_t set_pwm_max(struct device *dev,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm_max = temp;
@@ -1211,7 +1211,7 @@ static ssize_t set_pwm_min(struct device *dev,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm_min[attr->index] = temp;
@@ -1246,7 +1246,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = SENSORS_LIMIT(temp, 0, 15);
+       temp = clamp_val(temp, 0, 15);
 
        /* package things up */
        temp &= ADT7462_PWM_HYST_MASK;
@@ -1333,7 +1333,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm_tmin[attr->index] = temp;
index 39ecb1a3b9ef61982b0e81e9fbac5585da35dfac..b83bf4bb95eba3489e861d7f2d51e24e6a42d15f 100644 (file)
@@ -452,7 +452,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 60000);
+       temp = clamp_val(temp, 0, 60000);
 
        mutex_lock(&data->lock);
        data->auto_update_interval = temp;
@@ -481,7 +481,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, -1, 10);
+       temp = clamp_val(temp, -1, 10);
 
        mutex_lock(&data->lock);
        data->num_temp_sensors = temp;
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -604,7 +604,7 @@ static ssize_t set_fan_max(struct device *dev,
                return -EINVAL;
 
        temp = FAN_RPM_TO_PERIOD(temp);
-       temp = SENSORS_LIMIT(temp, 1, 65534);
+       temp = clamp_val(temp, 1, 65534);
 
        mutex_lock(&data->lock);
        data->fan_max[attr->index] = temp;
@@ -641,7 +641,7 @@ static ssize_t set_fan_min(struct device *dev,
                return -EINVAL;
 
        temp = FAN_RPM_TO_PERIOD(temp);
-       temp = SENSORS_LIMIT(temp, 1, 65534);
+       temp = clamp_val(temp, 1, 65534);
 
        mutex_lock(&data->lock);
        data->fan_min[attr->index] = temp;
@@ -717,7 +717,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm[attr->index] = temp;
@@ -749,7 +749,7 @@ static ssize_t set_pwm_max(struct device *dev,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm_max[attr->index] = temp;
@@ -782,7 +782,7 @@ static ssize_t set_pwm_min(struct device *dev,
        if (kstrtol(buf, 10, &temp))
                return -EINVAL;
 
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm_min[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = SENSORS_LIMIT(temp, 0, 255);
+       temp = clamp_val(temp, 0, 255);
 
        mutex_lock(&data->lock);
        data->pwm_tmin[attr->index] = temp;
index 989e54c3925224a84bbf8bc55ac900a53fdf49ad..22d008bbdc1011d16919e70170f2f9d09b394235 100644 (file)
@@ -201,10 +201,10 @@ static inline u16 temp2reg(struct adt7475_data *data, long val)
        u16 ret;
 
        if (!(data->config5 & CONFIG5_TWOSCOMP)) {
-               val = SENSORS_LIMIT(val, -64000, 191000);
+               val = clamp_val(val, -64000, 191000);
                ret = (val + 64500) / 1000;
        } else {
-               val = SENSORS_LIMIT(val, -128000, 127000);
+               val = clamp_val(val, -128000, 127000);
                if (val < -500)
                        ret = (256500 + val) / 1000;
                else
@@ -240,7 +240,7 @@ static inline u16 rpm2tach(unsigned long rpm)
        if (rpm == 0)
                return 0;
 
-       return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF);
+       return clamp_val((90000 * 60) / rpm, 1, 0xFFFF);
 }
 
 /* Scaling factors for voltage inputs, taken from the ADT7490 datasheet */
@@ -271,7 +271,7 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
                reg = (volt * 1024) / 2250;
        else
                reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
-       return SENSORS_LIMIT(reg, 0, 1023) & (0xff << 2);
+       return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
 static u16 adt7475_read_word(struct i2c_client *client, int reg)
@@ -451,10 +451,10 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
        switch (sattr->nr) {
        case OFFSET:
                if (data->config5 & CONFIG5_TEMPOFFSET) {
-                       val = SENSORS_LIMIT(val, -63000, 127000);
+                       val = clamp_val(val, -63000, 127000);
                        out = data->temp[OFFSET][sattr->index] = val / 1000;
                } else {
-                       val = SENSORS_LIMIT(val, -63000, 64000);
+                       val = clamp_val(val, -63000, 64000);
                        out = data->temp[OFFSET][sattr->index] = val / 500;
                }
                break;
@@ -471,7 +471,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
                adt7475_read_hystersis(client);
 
                temp = reg2temp(data, data->temp[THERM][sattr->index]);
-               val = SENSORS_LIMIT(val, temp - 15000, temp);
+               val = clamp_val(val, temp - 15000, temp);
                val = (temp - val) / 1000;
 
                if (sattr->index != 1) {
@@ -577,7 +577,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
         * to figure the range
         */
        temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
-       val = SENSORS_LIMIT(val, temp + autorange_table[0],
+       val = clamp_val(val, temp + autorange_table[0],
                temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
        val -= temp;
 
@@ -701,7 +701,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
                break;
        }
 
-       data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF);
+       data->pwm[sattr->nr][sattr->index] = clamp_val(val, 0, 0xFF);
        i2c_smbus_write_byte_data(client, reg,
                                  data->pwm[sattr->nr][sattr->index]);
 
index ae482e3afdac8c135eb3d501a6f20773a1624db5..4fe49d2bfe1de5c44b61b6bb3cbcfa3acecec41e 100644 (file)
@@ -241,7 +241,7 @@ static ssize_t set_temp(
        int ret = kstrtol(buf, 10, &val);
        if (ret)
                return ret;
-       val = SENSORS_LIMIT(val / 1000, -128, 127);
+       val = clamp_val(val / 1000, -128, 127);
 
        mutex_lock(&data->update_lock);
        data->temp[ix] = val;
@@ -332,7 +332,7 @@ static ssize_t set_pwm1(
                return ret;
 
        mutex_lock(&data->update_lock);
-       data->pwm1 = SENSORS_LIMIT(val , 0, 255);
+       data->pwm1 = clamp_val(val , 0, 255);
        i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
        mutex_unlock(&data->update_lock);
        return count;
@@ -499,11 +499,11 @@ static ssize_t set_temp_auto_point_temp(
        mutex_lock(&data->update_lock);
        switch (ix) {
        case 0:
-               ptemp[0] = SENSORS_LIMIT(val / 1000, 0,
-                               data->temp1_auto_point_temp[1]);
-               ptemp[0] = SENSORS_LIMIT(ptemp[0], 0,
-                               data->temp2_auto_point_temp[1]);
-               ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63);
+               ptemp[0] = clamp_val(val / 1000, 0,
+                                    data->temp1_auto_point_temp[1]);
+               ptemp[0] = clamp_val(ptemp[0], 0,
+                                    data->temp2_auto_point_temp[1]);
+               ptemp[0] = clamp_val(ptemp[0], 0, 63);
                if (i2c_smbus_write_byte_data(
                                        client,
                                        AMC6821_REG_PSV_TEMP,
@@ -515,20 +515,12 @@ static ssize_t set_temp_auto_point_temp(
                goto EXIT;
                break;
        case 1:
-               ptemp[1] = SENSORS_LIMIT(
-                                       val / 1000,
-                                       (ptemp[0] & 0x7C) + 4,
-                                       124);
+               ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124);
                ptemp[1] &= 0x7C;
-               ptemp[2] = SENSORS_LIMIT(
-                                       ptemp[2], ptemp[1] + 1,
-                                       255);
+               ptemp[2] = clamp_val(ptemp[2], ptemp[1] + 1, 255);
                break;
        case 2:
-               ptemp[2] = SENSORS_LIMIT(
-                                       val / 1000,
-                                       ptemp[1]+1,
-                                       255);
+               ptemp[2] = clamp_val(val / 1000, ptemp[1]+1, 255);
                break;
        default:
                dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
@@ -561,7 +553,7 @@ static ssize_t set_pwm1_auto_point_pwm(
                return ret;
 
        mutex_lock(&data->update_lock);
-       data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254);
+       data->pwm1_auto_point_pwm[1] = clamp_val(val, 0, 254);
        if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
                        data->pwm1_auto_point_pwm[1])) {
                dev_err(&client->dev, "Register write error, aborting.\n");
@@ -629,7 +621,7 @@ static ssize_t set_fan(
        val = 1 > val ? 0xFFFF : 6000000/val;
 
        mutex_lock(&data->update_lock);
-       data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF);
+       data->fan[ix] = (u16) clamp_val(val, 1, 0xFFFF);
        if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
                        data->fan[ix] & 0xFF)) {
                dev_err(&client->dev, "Register write error, aborting.\n");
index 520e5bf4f76d9f8cd1b16edf5610554fde103181..6ac612cabda1e1acace7382c74a553346468097f 100644 (file)
@@ -114,7 +114,7 @@ static const u16 asb100_reg_temp_hyst[]     = {0, 0x3a, 0x153, 0x253, 0x19};
  */
 static u8 IN_TO_REG(unsigned val)
 {
-       unsigned nval = SENSORS_LIMIT(val, ASB100_IN_MIN, ASB100_IN_MAX);
+       unsigned nval = clamp_val(val, ASB100_IN_MIN, ASB100_IN_MAX);
        return (nval + 8) / 16;
 }
 
@@ -129,8 +129,8 @@ static u8 FAN_TO_REG(long rpm, int div)
                return 0;
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 static int FAN_FROM_REG(u8 val, int div)
@@ -148,7 +148,7 @@ static int FAN_FROM_REG(u8 val, int div)
  */
 static u8 TEMP_TO_REG(long temp)
 {
-       int ntemp = SENSORS_LIMIT(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
+       int ntemp = clamp_val(temp, ASB100_TEMP_MIN, ASB100_TEMP_MAX);
        ntemp += (ntemp < 0 ? -500 : 500);
        return (u8)(ntemp / 1000);
 }
@@ -164,7 +164,7 @@ static int TEMP_FROM_REG(u8 reg)
  */
 static u8 ASB100_PWM_TO_REG(int pwm)
 {
-       pwm = SENSORS_LIMIT(pwm, 0, 255);
+       pwm = clamp_val(pwm, 0, 255);
        return (u8)(pwm / 16);
 }
 
index b867aab7804975f91fc53f7cc76382f4af39694f..da7f5b5d5db5a2e36ca1295fc9052651f35ab0c2 100644 (file)
@@ -191,7 +191,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
        if (kstrtol(buf, 10, &reqval))
                return -EINVAL;
 
-       reqval = SENSORS_LIMIT(reqval, 0, 255);
+       reqval = clamp_val(reqval, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->reg[param->msb[0]] = reqval;
@@ -224,7 +224,7 @@ static ssize_t store_bitmask(struct device *dev,
        if (kstrtol(buf, 10, &reqval))
                return -EINVAL;
 
-       reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
+       reqval = clamp_val(reqval, 0, param->mask[0]);
 
        reqval = (reqval & param->mask[0]) << param->shift[0];
 
@@ -274,7 +274,7 @@ static ssize_t store_fan16(struct device *dev,
         * generating an alarm.
         */
        reqval =
-           (reqval <= 0 ? 0xffff : SENSORS_LIMIT(5400000 / reqval, 0, 0xfffe));
+           (reqval <= 0 ? 0xffff : clamp_val(5400000 / reqval, 0, 0xfffe));
 
        mutex_lock(&data->update_lock);
        data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
@@ -343,11 +343,11 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
        if (kstrtol(buf, 10, &reqval))
                return -EINVAL;
 
-       reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
+       reqval = clamp_val(reqval, 0, 0xffff);
 
        reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
 
-       reqval = SENSORS_LIMIT(reqval, 0, 0xff);
+       reqval = clamp_val(reqval, 0, 0xff);
 
        mutex_lock(&data->update_lock);
        data->reg[param->msb[0]] = reqval;
@@ -376,7 +376,7 @@ static ssize_t store_temp8(struct device *dev,
        if (kstrtol(buf, 10, &reqval))
                return -EINVAL;
 
-       reqval = SENSORS_LIMIT(reqval, -127000, 127000);
+       reqval = clamp_val(reqval, -127000, 127000);
 
        temp = reqval / 1000;
 
@@ -432,7 +432,7 @@ static ssize_t store_temp62(struct device *dev,
        if (kstrtol(buf, 10, &reqval))
                return -EINVAL;
 
-       reqval = SENSORS_LIMIT(reqval, -32000, 31750);
+       reqval = clamp_val(reqval, -32000, 31750);
        i = reqval / 1000;
        f = reqval - (i * 1000);
        temp = i << 2;
@@ -468,7 +468,7 @@ static ssize_t show_ap2_temp(struct device *dev,
        auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000;
        regval =
            ((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]);
-       temp = auto_point1 + asc7621_range_map[SENSORS_LIMIT(regval, 0, 15)];
+       temp = auto_point1 + asc7621_range_map[clamp_val(regval, 0, 15)];
        mutex_unlock(&data->update_lock);
 
        return sprintf(buf, "%d\n", temp);
@@ -489,7 +489,7 @@ static ssize_t store_ap2_temp(struct device *dev,
 
        mutex_lock(&data->update_lock);
        auto_point1 = data->reg[param->msb[1]] * 1000;
-       reqval = SENSORS_LIMIT(reqval, auto_point1 + 2000, auto_point1 + 80000);
+       reqval = clamp_val(reqval, auto_point1 + 2000, auto_point1 + 80000);
 
        for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) {
                if (reqval >= auto_point1 + asc7621_range_map[i]) {
@@ -523,7 +523,7 @@ static ssize_t show_pwm_ac(struct device *dev,
        regval = config | (altbit << 3);
        mutex_unlock(&data->update_lock);
 
-       return sprintf(buf, "%u\n", map[SENSORS_LIMIT(regval, 0, 15)]);
+       return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]);
 }
 
 static ssize_t store_pwm_ac(struct device *dev,
@@ -663,7 +663,7 @@ static ssize_t show_pwm_freq(struct device *dev,
        u8 regval =
            (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
 
-       regval = SENSORS_LIMIT(regval, 0, 15);
+       regval = clamp_val(regval, 0, 15);
 
        return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]);
 }
@@ -711,7 +711,7 @@ static ssize_t show_pwm_ast(struct device *dev,
        u8 regval =
            (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
 
-       regval = SENSORS_LIMIT(regval, 0, 7);
+       regval = clamp_val(regval, 0, 7);
 
        return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]);
 
@@ -759,7 +759,7 @@ static ssize_t show_temp_st(struct device *dev,
        SETUP_SHOW_data_param(dev, attr);
        u8 regval =
            (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
-       regval = SENSORS_LIMIT(regval, 0, 7);
+       regval = clamp_val(regval, 0, 7);
 
        return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]);
 }
index d64923d6353741ba8a6e9c233712bd448849478c..3f1e297663ada0fefa412ce2e0fd4e362d84af50 100644 (file)
@@ -198,7 +198,7 @@ struct tjmax {
 static const struct tjmax __cpuinitconst tjmax_table[] = {
        { "CPU  230", 100000 },         /* Model 0x1c, stepping 2       */
        { "CPU  330", 125000 },         /* Model 0x1c, stepping 2       */
-       { "CPU CE4110", 110000 },       /* Model 0x1c, stepping 10      */
+       { "CPU CE4110", 110000 },       /* Model 0x1c, stepping 10 Sodaville */
        { "CPU CE4150", 110000 },       /* Model 0x1c, stepping 10      */
        { "CPU CE4170", 110000 },       /* Model 0x1c, stepping 10      */
 };
@@ -212,7 +212,7 @@ struct tjmax_model {
 #define ANY 0xff
 
 static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
-       { 0x1c, 10, 100000 },   /* D4xx, N4xx, D5xx, N5xx */
+       { 0x1c, 10, 100000 },   /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
        { 0x1c, ANY, 90000 },   /* Z5xx, N2xx, possibly others
                                 * Note: Also matches 230 and 330,
                                 * which are covered by tjmax_table
@@ -222,6 +222,7 @@ static const struct tjmax_model __cpuinitconst tjmax_model_table[] = {
                                 * is undetectable by software
                                 */
        { 0x27, ANY, 90000 },   /* Atom Medfield (Z2460) */
+       { 0x35, ANY, 90000 },   /* Atom Clover Trail/Cloverview (Z2760) */
        { 0x36, ANY, 100000 },  /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */
 };
 
index 7430f70ae452958b5a1bdbd8be308fdbf5a2e405..c347c94f2f7336dbc4b86f3e841f7ebf1a44954b 100644 (file)
@@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res)
 
 static inline int IN_TO_REG(int val, int nominal)
 {
-       return SENSORS_LIMIT((val * 192 + nominal / 2) / nominal, 0, 255);
+       return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
 }
 
 /*
@@ -293,8 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res)
 
 static inline int TEMP_TO_REG(int val)
 {
-       return SENSORS_LIMIT((val < 0 ? val - 500 : val + 500) / 1000,
-                            -128, 127);
+       return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
 }
 
 /* Temperature range */
@@ -332,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix)
 
 static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
 {
-       int hyst = SENSORS_LIMIT((val + 500) / 1000, 0, 15);
+       int hyst = clamp_val((val + 500) / 1000, 0, 15);
 
        return (ix == 1) ? (reg & 0xf0) | hyst : (reg & 0x0f) | (hyst << 4);
 }
@@ -349,10 +348,10 @@ static inline int FAN_FROM_REG(int reg, int tpc)
 static inline int FAN_TO_REG(int val, int tpc)
 {
        if (tpc) {
-               return SENSORS_LIMIT(val / tpc, 0, 0xffff);
+               return clamp_val(val / tpc, 0, 0xffff);
        } else {
                return (val <= 0) ? 0xffff :
-                       SENSORS_LIMIT(90000 * 60 / val, 0, 0xfffe);
+                       clamp_val(90000 * 60 / val, 0, 0xfffe);
        }
 }
 
@@ -1282,7 +1281,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
        switch (fn) {
        case SYS_PWM:
-               data->pwm[ix] = SENSORS_LIMIT(val, 0, 255);
+               data->pwm[ix] = clamp_val(val, 0, 255);
                dme1737_write(data, DME1737_REG_PWM(ix), data->pwm[ix]);
                break;
        case SYS_PWM_FREQ:
@@ -1450,7 +1449,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
                break;
        case SYS_PWM_AUTO_POINT1_PWM:
                /* Only valid for pwm[1-3] */
-               data->pwm_min[ix] = SENSORS_LIMIT(val, 0, 255);
+               data->pwm_min[ix] = clamp_val(val, 0, 255);
                dme1737_write(data, DME1737_REG_PWM_MIN(ix),
                              data->pwm_min[ix]);
                break;
index 77f434c5823690930f0cfa0d2e41932d91eb3223..b073056220873d4e147150b4e376b0f9c40c363d 100644 (file)
@@ -405,7 +405,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
        if (rpm_target == 0)
                data->fan_target = 0x1fff;
        else
-               data->fan_target = SENSORS_LIMIT(
+               data->fan_target = clamp_val(
                        (FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
                        0, 0x1fff);
 
index 789bd4fb329b9b2eeca82b07751ab538b5c19e9a..936898f82f94d140ab00d0b58d78bc195d23e480 100644 (file)
@@ -220,7 +220,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
                          : EMC6W201_REG_IN_HIGH(nr);
 
        mutex_lock(&data->update_lock);
-       data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
+       data->in[sf][nr] = clamp_val(val, 0, 255);
        err = emc6w201_write8(client, reg, data->in[sf][nr]);
        mutex_unlock(&data->update_lock);
 
@@ -257,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
                          : EMC6W201_REG_TEMP_HIGH(nr);
 
        mutex_lock(&data->update_lock);
-       data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
+       data->temp[sf][nr] = clamp_val(val, -127, 128);
        err = emc6w201_write8(client, reg, data->temp[sf][nr]);
        mutex_unlock(&data->update_lock);
 
@@ -298,7 +298,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
                val = 0xFFFF;
        } else {
                val = DIV_ROUND_CLOSEST(5400000U, val);
-               val = SENSORS_LIMIT(val, 0, 0xFFFE);
+               val = clamp_val(val, 0, 0xFFFE);
        }
 
        mutex_lock(&data->update_lock);
index bb7275cc47f30d8055f871cf3904bfde33f862cb..cfb02dd91aadda144e5986e19e3f57e0fd65743e 100644 (file)
@@ -1350,7 +1350,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
        if (err)
                return err;
 
-       val = SENSORS_LIMIT(val, 23, 1500000);
+       val = clamp_val(val, 23, 1500000);
        val = fan_to_reg(val);
 
        mutex_lock(&data->update_lock);
@@ -1438,7 +1438,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
                return err;
 
        val /= 8;
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
@@ -1542,7 +1542,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
                return err;
 
        val /= 1000;
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        f71882fg_write8(data, F71882FG_REG_TEMP_HIGH(nr), val);
@@ -1589,8 +1589,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
 
        /* convert abs to relative and check */
        data->temp_high[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_HIGH(nr));
-       val = SENSORS_LIMIT(val, data->temp_high[nr] - 15,
-                           data->temp_high[nr]);
+       val = clamp_val(val, data->temp_high[nr] - 15, data->temp_high[nr]);
        val = data->temp_high[nr] - val;
 
        /* convert value to register contents */
@@ -1627,7 +1626,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
                return err;
 
        val /= 1000;
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        f71882fg_write8(data, F71882FG_REG_TEMP_OVT(nr), val);
@@ -1754,7 +1753,7 @@ static ssize_t store_pwm(struct device *dev,
        if (err)
                return err;
 
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1805,7 +1804,7 @@ static ssize_t store_simple_pwm(struct device *dev,
        if (err)
                return err;
 
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        f71882fg_write8(data, F71882FG_REG_PWM(nr), val);
@@ -1932,7 +1931,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
        if (err)
                return err;
 
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -1991,8 +1990,8 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
        mutex_lock(&data->update_lock);
        data->pwm_auto_point_temp[nr][point] =
                f71882fg_read8(data, F71882FG_REG_POINT_TEMP(nr, point));
-       val = SENSORS_LIMIT(val, data->pwm_auto_point_temp[nr][point] - 15,
-                               data->pwm_auto_point_temp[nr][point]);
+       val = clamp_val(val, data->pwm_auto_point_temp[nr][point] - 15,
+                       data->pwm_auto_point_temp[nr][point]);
        val = data->pwm_auto_point_temp[nr][point] - val;
 
        reg = f71882fg_read8(data, F71882FG_REG_FAN_HYST(nr / 2));
@@ -2126,9 +2125,9 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
        val /= 1000;
 
        if (data->auto_point_temp_signed)
-               val = SENSORS_LIMIT(val, -128, 127);
+               val = clamp_val(val, -128, 127);
        else
-               val = SENSORS_LIMIT(val, 0, 127);
+               val = clamp_val(val, 0, 127);
 
        mutex_lock(&data->update_lock);
        f71882fg_write8(data, F71882FG_REG_POINT_TEMP(pwm, point), val);
index f7dba229395f9a844da9a8418ac9c5b2918d75dd..9e300e567f15c6b0cc22d6245e9a1cccbc1d8e0e 100644 (file)
@@ -359,7 +359,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+       data->pwm[nr] = clamp_val(val, 0, 255);
        f75375_write_pwm(client, nr);
        mutex_unlock(&data->update_lock);
        return count;
@@ -556,7 +556,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
+       val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
        mutex_lock(&data->update_lock);
        data->in_max[nr] = val;
        f75375_write8(client, F75375_REG_VOLT_HIGH(nr), data->in_max[nr]);
@@ -577,7 +577,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
+       val = clamp_val(VOLT_TO_REG(val), 0, 0xff);
        mutex_lock(&data->update_lock);
        data->in_min[nr] = val;
        f75375_write8(client, F75375_REG_VOLT_LOW(nr), data->in_min[nr]);
@@ -625,7 +625,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
+       val = clamp_val(TEMP_TO_REG(val), 0, 127);
        mutex_lock(&data->update_lock);
        data->temp_high[nr] = val;
        f75375_write8(client, F75375_REG_TEMP_HIGH(nr), data->temp_high[nr]);
@@ -646,7 +646,7 @@ static ssize_t set_temp_max_hyst(struct device *dev,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
+       val = clamp_val(TEMP_TO_REG(val), 0, 127);
        mutex_lock(&data->update_lock);
        data->temp_max_hyst[nr] = val;
        f75375_write8(client, F75375_REG_TEMP_HYST(nr),
@@ -822,7 +822,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
                if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) ||
                    !duty_mode_enabled(f75375s_pdata->pwm_enable[nr]))
                        continue;
-               data->pwm[nr] = SENSORS_LIMIT(f75375s_pdata->pwm[nr], 0, 255);
+               data->pwm[nr] = clamp_val(f75375s_pdata->pwm[nr], 0, 255);
                f75375_write_pwm(client, nr);
        }
 
index 519ce8b9c1427af878d495cb89addf42b20db1ef..8af2755cdb871201fc56f6a0ab4ad149b21634c6 100644 (file)
@@ -379,7 +379,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
        if (err)
                return err;
 
-       v = SENSORS_LIMIT(v / 1000, -128, 127) + 128;
+       v = clamp_val(v / 1000, -128, 127) + 128;
 
        mutex_lock(&data->update_lock);
        i2c_smbus_write_byte_data(to_i2c_client(dev),
@@ -540,7 +540,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
 
        /* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */
        if (v || data->kind == fscsyl) {
-               v = SENSORS_LIMIT(v, 128, 255);
+               v = clamp_val(v, 128, 255);
                v = (v - 128) * 2 + 1;
        }
 
index 8b2106f60edac7e91c4eb6eaabfb2cc6585636a0..ea6480b80e7fcd3df07eb80c91a52e9620b9019f 100644 (file)
@@ -171,7 +171,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
                return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       data->set_cnt = PWM_TO_CNT(SENSORS_LIMIT(val, 0, 255));
+       data->set_cnt = PWM_TO_CNT(clamp_val(val, 0, 255));
        g760a_write_value(client, G760A_REG_SET_CNT, data->set_cnt);
        mutex_unlock(&data->update_lock);
 
index 2c74673f48e5b770caa5478191c0574310d038ab..e2e5909a34df05fba91e7ac71f33905b9d0d91c0 100644 (file)
@@ -86,7 +86,7 @@ enum chips { gl518sm_r00, gl518sm_r80 };
 #define BOOL_FROM_REG(val)     ((val) ? 0 : 1)
 #define BOOL_TO_REG(val)       ((val) ? 0 : 1)
 
-#define TEMP_TO_REG(val)       SENSORS_LIMIT(((((val) < 0 ? \
+#define TEMP_TO_REG(val)       clamp_val(((((val) < 0 ? \
                                (val) - 500 : \
                                (val) + 500) / 1000) + 119), 0, 255)
 #define TEMP_FROM_REG(val)     (((val) - 119) * 1000)
@@ -96,15 +96,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
        long rpmdiv;
        if (rpm == 0)
                return 0;
-       rpmdiv = SENSORS_LIMIT(rpm, 1, 960000) * div;
-       return SENSORS_LIMIT((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
+       rpmdiv = clamp_val(rpm, 1, 960000) * div;
+       return clamp_val((480000 + rpmdiv / 2) / rpmdiv, 1, 255);
 }
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) * (div))))
 
-#define IN_TO_REG(val)         SENSORS_LIMIT((((val) + 9) / 19), 0, 255)
+#define IN_TO_REG(val)         clamp_val((((val) + 9) / 19), 0, 255)
 #define IN_FROM_REG(val)       ((val) * 19)
 
-#define VDD_TO_REG(val)                SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_TO_REG(val)                clamp_val((((val) * 4 + 47) / 95), 0, 255)
 #define VDD_FROM_REG(val)      (((val) * 95 + 2) / 4)
 
 #define DIV_FROM_REG(val)      (1 << (val))
index a21ff252f2f1b8cd6a14a898bf72fd36f9ce3465..ed56e09c3dd7147b4f209fb0a79244300692c341 100644 (file)
@@ -144,10 +144,10 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
 
 #define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) SENSORS_LIMIT((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
 
 #define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) SENSORS_LIMIT((((val) + 9) / 19), 0, 255)
+#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
 
 static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
                            char *buf)
@@ -285,8 +285,7 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
 #define DIV_FROM_REG(val) (1 << (val))
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
 #define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
-       SENSORS_LIMIT((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, \
-                     255))
+       clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
 
 static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
                             char *buf)
@@ -450,7 +449,7 @@ static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
                get_fan_off, set_fan_off);
 
 #define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) SENSORS_LIMIT(((((val) < 0 ? \
+#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
                        (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
 
 static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
new file mode 100644 (file)
index 0000000..c6fdd5b
--- /dev/null
@@ -0,0 +1,636 @@
+/*
+ * Driver for the Texas Instruments / Burr Brown INA209
+ * Bidirectional Current/Power Monitor
+ *
+ * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * Derived from Ira W. Snyder's original driver submission
+ *     Copyright (C) 2008 Paul Hays <Paul.Hays@cattail.ca>
+ *     Copyright (C) 2008-2009 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * Aligned with ina2xx driver
+ *     Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ *     Thanks to Jan Volkering
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Datasheet:
+ * http://www.ti.com/lit/gpn/ina209
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#include <linux/platform_data/ina2xx.h>
+
+/* register definitions */
+#define INA209_CONFIGURATION           0x00
+#define INA209_STATUS                  0x01
+#define INA209_STATUS_MASK             0x02
+#define INA209_SHUNT_VOLTAGE           0x03
+#define INA209_BUS_VOLTAGE             0x04
+#define INA209_POWER                   0x05
+#define INA209_CURRENT                 0x06
+#define INA209_SHUNT_VOLTAGE_POS_PEAK  0x07
+#define INA209_SHUNT_VOLTAGE_NEG_PEAK  0x08
+#define INA209_BUS_VOLTAGE_MAX_PEAK    0x09
+#define INA209_BUS_VOLTAGE_MIN_PEAK    0x0a
+#define INA209_POWER_PEAK              0x0b
+#define INA209_SHUNT_VOLTAGE_POS_WARN  0x0c
+#define INA209_SHUNT_VOLTAGE_NEG_WARN  0x0d
+#define INA209_POWER_WARN              0x0e
+#define INA209_BUS_VOLTAGE_OVER_WARN   0x0f
+#define INA209_BUS_VOLTAGE_UNDER_WARN  0x10
+#define INA209_POWER_OVER_LIMIT                0x11
+#define INA209_BUS_VOLTAGE_OVER_LIMIT  0x12
+#define INA209_BUS_VOLTAGE_UNDER_LIMIT 0x13
+#define INA209_CRITICAL_DAC_POS                0x14
+#define INA209_CRITICAL_DAC_NEG                0x15
+#define INA209_CALIBRATION             0x16
+
+#define INA209_REGISTERS               0x17
+
+#define INA209_CONFIG_DEFAULT          0x3c47  /* PGA=8, full range */
+#define INA209_SHUNT_DEFAULT           10000   /* uOhm */
+
+struct ina209_data {
+       struct device *hwmon_dev;
+
+       struct mutex update_lock;
+       bool valid;
+       unsigned long last_updated;     /* in jiffies */
+
+       u16 regs[INA209_REGISTERS];     /* All chip registers */
+
+       u16 config_orig;                /* Original configuration */
+       u16 calibration_orig;           /* Original calibration */
+       u16 update_interval;
+};
+
+static struct ina209_data *ina209_update_device(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ina209_data *data = i2c_get_clientdata(client);
+       struct ina209_data *ret = data;
+       s32 val;
+       int i;
+
+       mutex_lock(&data->update_lock);
+
+       if (!data->valid ||
+           time_after(jiffies, data->last_updated + data->update_interval)) {
+               for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+                       val = i2c_smbus_read_word_swapped(client, i);
+                       if (val < 0) {
+                               ret = ERR_PTR(val);
+                               goto abort;
+                       }
+                       data->regs[i] = val;
+               }
+               data->last_updated = jiffies;
+               data->valid = true;
+       }
+abort:
+       mutex_unlock(&data->update_lock);
+       return ret;
+}
+
+/*
+ * Read a value from a device register and convert it to the
+ * appropriate sysfs units
+ */
+static long ina209_from_reg(const u8 reg, const u16 val)
+{
+       switch (reg) {
+       case INA209_SHUNT_VOLTAGE:
+       case INA209_SHUNT_VOLTAGE_POS_PEAK:
+       case INA209_SHUNT_VOLTAGE_NEG_PEAK:
+       case INA209_SHUNT_VOLTAGE_POS_WARN:
+       case INA209_SHUNT_VOLTAGE_NEG_WARN:
+               /* LSB=10 uV. Convert to mV. */
+               return DIV_ROUND_CLOSEST(val, 100);
+
+       case INA209_BUS_VOLTAGE:
+       case INA209_BUS_VOLTAGE_MAX_PEAK:
+       case INA209_BUS_VOLTAGE_MIN_PEAK:
+       case INA209_BUS_VOLTAGE_OVER_WARN:
+       case INA209_BUS_VOLTAGE_UNDER_WARN:
+       case INA209_BUS_VOLTAGE_OVER_LIMIT:
+       case INA209_BUS_VOLTAGE_UNDER_LIMIT:
+               /* LSB=4 mV, last 3 bits unused */
+               return (val >> 3) * 4;
+
+       case INA209_CRITICAL_DAC_POS:
+               /* LSB=1 mV, in the upper 8 bits */
+               return val >> 8;
+
+       case INA209_CRITICAL_DAC_NEG:
+               /* LSB=1 mV, in the upper 8 bits */
+               return -1 * (val >> 8);
+
+       case INA209_POWER:
+       case INA209_POWER_PEAK:
+       case INA209_POWER_WARN:
+       case INA209_POWER_OVER_LIMIT:
+               /* LSB=20 mW. Convert to uW */
+               return val * 20 * 1000L;
+
+       case INA209_CURRENT:
+               /* LSB=1 mA (selected). Is in mA */
+               return val;
+       }
+
+       /* programmer goofed */
+       WARN_ON_ONCE(1);
+       return 0;
+}
+
+/*
+ * Take a value and convert it to register format, clamping the value
+ * to the appropriate range.
+ */
+static int ina209_to_reg(u8 reg, u16 old, long val)
+{
+       switch (reg) {
+       case INA209_SHUNT_VOLTAGE_POS_WARN:
+       case INA209_SHUNT_VOLTAGE_NEG_WARN:
+               /* Limit to +- 320 mV, 10 uV LSB */
+               return clamp_val(val, -320, 320) * 100;
+
+       case INA209_BUS_VOLTAGE_OVER_WARN:
+       case INA209_BUS_VOLTAGE_UNDER_WARN:
+       case INA209_BUS_VOLTAGE_OVER_LIMIT:
+       case INA209_BUS_VOLTAGE_UNDER_LIMIT:
+               /*
+                * Limit to 0-32000 mV, 4 mV LSB
+                *
+                * The last three bits aren't part of the value, but we'll
+                * preserve them in their original state.
+                */
+               return (DIV_ROUND_CLOSEST(clamp_val(val, 0, 32000), 4) << 3)
+                 | (old & 0x7);
+
+       case INA209_CRITICAL_DAC_NEG:
+               /*
+                * Limit to -255-0 mV, 1 mV LSB
+                * Convert the value to a positive value for the register
+                *
+                * The value lives in the top 8 bits only, be careful
+                * and keep original value of other bits.
+                */
+               return (clamp_val(-val, 0, 255) << 8) | (old & 0xff);
+
+       case INA209_CRITICAL_DAC_POS:
+               /*
+                * Limit to 0-255 mV, 1 mV LSB
+                *
+                * The value lives in the top 8 bits only, be careful
+                * and keep original value of other bits.
+                */
+               return (clamp_val(val, 0, 255) << 8) | (old & 0xff);
+
+       case INA209_POWER_WARN:
+       case INA209_POWER_OVER_LIMIT:
+               /* 20 mW LSB */
+               return DIV_ROUND_CLOSEST(val, 20 * 1000);
+       }
+
+       /* Other registers are read-only, return access error */
+       return -EACCES;
+}
+
+static int ina209_interval_from_reg(u16 reg)
+{
+       return 68 >> (15 - ((reg >> 3) & 0x0f));
+}
+
+static u16 ina209_reg_from_interval(u16 config, long interval)
+{
+       int i, adc;
+
+       if (interval <= 0) {
+               adc = 8;
+       } else {
+               adc = 15;
+               for (i = 34 + 34 / 2; i; i >>= 1) {
+                       if (i < interval)
+                               break;
+                       adc--;
+               }
+       }
+       return (config & 0xf807) | (adc << 3) | (adc << 7);
+}
+
+static ssize_t ina209_set_interval(struct device *dev,
+                                  struct device_attribute *da,
+                                  const char *buf, size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ina209_data *data = ina209_update_device(dev);
+       long val;
+       u16 regval;
+       int ret;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&data->update_lock);
+       regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
+                                         val);
+       i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+       data->regs[INA209_CONFIGURATION] = regval;
+       data->update_interval = ina209_interval_from_reg(regval);
+       mutex_unlock(&data->update_lock);
+       return count;
+}
+
+static ssize_t ina209_show_interval(struct device *dev,
+                                   struct device_attribute *da, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ina209_data *data = i2c_get_clientdata(client);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
+}
+
+/*
+ * History is reset by writing 1 into bit 0 of the respective peak register.
+ * Since more than one peak register may be affected by the scope of a
+ * reset_history attribute write, use a bit mask in attr->index to identify
+ * which registers are affected.
+ */
+static u16 ina209_reset_history_regs[] = {
+       INA209_SHUNT_VOLTAGE_POS_PEAK,
+       INA209_SHUNT_VOLTAGE_NEG_PEAK,
+       INA209_BUS_VOLTAGE_MAX_PEAK,
+       INA209_BUS_VOLTAGE_MIN_PEAK,
+       INA209_POWER_PEAK
+};
+
+static ssize_t ina209_reset_history(struct device *dev,
+                                   struct device_attribute *da,
+                                   const char *buf,
+                                   size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ina209_data *data = i2c_get_clientdata(client);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       u32 mask = attr->index;
+       long val;
+       int i, ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&data->update_lock);
+       for (i = 0; i < ARRAY_SIZE(ina209_reset_history_regs); i++) {
+               if (mask & (1 << i))
+                       i2c_smbus_write_word_swapped(client,
+                                       ina209_reset_history_regs[i], 1);
+       }
+       data->valid = false;
+       mutex_unlock(&data->update_lock);
+       return count;
+}
+
+static ssize_t ina209_set_value(struct device *dev,
+                               struct device_attribute *da,
+                               const char *buf,
+                               size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct ina209_data *data = ina209_update_device(dev);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       int reg = attr->index;
+       long val;
+       int ret;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&data->update_lock);
+       ret = ina209_to_reg(reg, data->regs[reg], val);
+       if (ret < 0) {
+               count = ret;
+               goto abort;
+       }
+       i2c_smbus_write_word_swapped(client, reg, ret);
+       data->regs[reg] = ret;
+abort:
+       mutex_unlock(&data->update_lock);
+       return count;
+}
+
+static ssize_t ina209_show_value(struct device *dev,
+                                struct device_attribute *da,
+                                char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct ina209_data *data = ina209_update_device(dev);
+       long val;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       val = ina209_from_reg(attr->index, data->regs[attr->index]);
+       return snprintf(buf, PAGE_SIZE, "%ld\n", val);
+}
+
+static ssize_t ina209_show_alarm(struct device *dev,
+                                struct device_attribute *da,
+                                char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct ina209_data *data = ina209_update_device(dev);
+       const unsigned int mask = attr->index;
+       u16 status;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       status = data->regs[INA209_STATUS];
+
+       /*
+        * All alarms are in the INA209_STATUS register. To avoid a long
+        * switch statement, the mask is passed in attr->index
+        */
+       return snprintf(buf, PAGE_SIZE, "%u\n", !!(status & mask));
+}
+
+/* Shunt voltage, history, limits, alarms */
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina209_show_value, NULL,
+                         INA209_SHUNT_VOLTAGE);
+static SENSOR_DEVICE_ATTR(in0_input_highest, S_IRUGO, ina209_show_value, NULL,
+                         INA209_SHUNT_VOLTAGE_POS_PEAK);
+static SENSOR_DEVICE_ATTR(in0_input_lowest, S_IRUGO, ina209_show_value, NULL,
+                         INA209_SHUNT_VOLTAGE_NEG_PEAK);
+static SENSOR_DEVICE_ATTR(in0_reset_history, S_IWUSR, NULL,
+                         ina209_reset_history, (1 << 0) | (1 << 1));
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_SHUNT_VOLTAGE_POS_WARN);
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_SHUNT_VOLTAGE_NEG_WARN);
+static SENSOR_DEVICE_ATTR(in0_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_CRITICAL_DAC_POS);
+static SENSOR_DEVICE_ATTR(in0_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_CRITICAL_DAC_NEG);
+
+static SENSOR_DEVICE_ATTR(in0_min_alarm,  S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 11);
+static SENSOR_DEVICE_ATTR(in0_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 12);
+static SENSOR_DEVICE_ATTR(in0_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 6);
+static SENSOR_DEVICE_ATTR(in0_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 7);
+
+/* Bus voltage, history, limits, alarms */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, ina209_show_value, NULL,
+                         INA209_BUS_VOLTAGE);
+static SENSOR_DEVICE_ATTR(in1_input_highest, S_IRUGO, ina209_show_value, NULL,
+                         INA209_BUS_VOLTAGE_MAX_PEAK);
+static SENSOR_DEVICE_ATTR(in1_input_lowest, S_IRUGO, ina209_show_value, NULL,
+                         INA209_BUS_VOLTAGE_MIN_PEAK);
+static SENSOR_DEVICE_ATTR(in1_reset_history, S_IWUSR, NULL,
+                         ina209_reset_history, (1 << 2) | (1 << 3));
+static SENSOR_DEVICE_ATTR(in1_max, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_BUS_VOLTAGE_OVER_WARN);
+static SENSOR_DEVICE_ATTR(in1_min, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_BUS_VOLTAGE_UNDER_WARN);
+static SENSOR_DEVICE_ATTR(in1_crit_max, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_BUS_VOLTAGE_OVER_LIMIT);
+static SENSOR_DEVICE_ATTR(in1_crit_min, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_BUS_VOLTAGE_UNDER_LIMIT);
+
+static SENSOR_DEVICE_ATTR(in1_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 14);
+static SENSOR_DEVICE_ATTR(in1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 15);
+static SENSOR_DEVICE_ATTR(in1_crit_min_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 9);
+static SENSOR_DEVICE_ATTR(in1_crit_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 10);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina209_show_value, NULL,
+                         INA209_POWER);
+static SENSOR_DEVICE_ATTR(power1_input_highest, S_IRUGO, ina209_show_value,
+                         NULL, INA209_POWER_PEAK);
+static SENSOR_DEVICE_ATTR(power1_reset_history, S_IWUSR, NULL,
+                         ina209_reset_history, 1 << 4);
+static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_POWER_WARN);
+static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO | S_IWUSR, ina209_show_value,
+                         ina209_set_value, INA209_POWER_OVER_LIMIT);
+
+static SENSOR_DEVICE_ATTR(power1_max_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 13);
+static SENSOR_DEVICE_ATTR(power1_crit_alarm, S_IRUGO, ina209_show_alarm, NULL,
+                         1 << 8);
+
+/* Current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina209_show_value, NULL,
+                         INA209_CURRENT);
+
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+                         ina209_show_interval, ina209_set_interval, 0);
+
+/*
+ * Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ina209_attributes[] = {
+       &sensor_dev_attr_in0_input.dev_attr.attr,
+       &sensor_dev_attr_in0_input_highest.dev_attr.attr,
+       &sensor_dev_attr_in0_input_lowest.dev_attr.attr,
+       &sensor_dev_attr_in0_reset_history.dev_attr.attr,
+       &sensor_dev_attr_in0_max.dev_attr.attr,
+       &sensor_dev_attr_in0_min.dev_attr.attr,
+       &sensor_dev_attr_in0_crit_max.dev_attr.attr,
+       &sensor_dev_attr_in0_crit_min.dev_attr.attr,
+       &sensor_dev_attr_in0_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in0_min_alarm.dev_attr.attr,
+       &sensor_dev_attr_in0_crit_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in0_crit_min_alarm.dev_attr.attr,
+
+       &sensor_dev_attr_in1_input.dev_attr.attr,
+       &sensor_dev_attr_in1_input_highest.dev_attr.attr,
+       &sensor_dev_attr_in1_input_lowest.dev_attr.attr,
+       &sensor_dev_attr_in1_reset_history.dev_attr.attr,
+       &sensor_dev_attr_in1_max.dev_attr.attr,
+       &sensor_dev_attr_in1_min.dev_attr.attr,
+       &sensor_dev_attr_in1_crit_max.dev_attr.attr,
+       &sensor_dev_attr_in1_crit_min.dev_attr.attr,
+       &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
+       &sensor_dev_attr_in1_crit_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in1_crit_min_alarm.dev_attr.attr,
+
+       &sensor_dev_attr_power1_input.dev_attr.attr,
+       &sensor_dev_attr_power1_input_highest.dev_attr.attr,
+       &sensor_dev_attr_power1_reset_history.dev_attr.attr,
+       &sensor_dev_attr_power1_max.dev_attr.attr,
+       &sensor_dev_attr_power1_crit.dev_attr.attr,
+       &sensor_dev_attr_power1_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_power1_crit_alarm.dev_attr.attr,
+
+       &sensor_dev_attr_curr1_input.dev_attr.attr,
+
+       &sensor_dev_attr_update_interval.dev_attr.attr,
+
+       NULL,
+};
+
+static const struct attribute_group ina209_group = {
+       .attrs = ina209_attributes,
+};
+
+static void ina209_restore_conf(struct i2c_client *client,
+                               struct ina209_data *data)
+{
+       /* Restore initial configuration */
+       i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
+                                    data->config_orig);
+       i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
+                                    data->calibration_orig);
+}
+
+static int ina209_init_client(struct i2c_client *client,
+                             struct ina209_data *data)
+{
+       struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
+       u32 shunt;
+       int reg;
+
+       reg = i2c_smbus_read_word_swapped(client, INA209_CALIBRATION);
+       if (reg < 0)
+               return reg;
+       data->calibration_orig = reg;
+
+       reg = i2c_smbus_read_word_swapped(client, INA209_CONFIGURATION);
+       if (reg < 0)
+               return reg;
+       data->config_orig = reg;
+
+       if (pdata) {
+               if (pdata->shunt_uohms <= 0)
+                       return -EINVAL;
+               shunt = pdata->shunt_uohms;
+       } else if (!of_property_read_u32(client->dev.of_node, "shunt-resistor",
+                                        &shunt)) {
+               if (shunt == 0)
+                       return -EINVAL;
+       } else {
+               shunt = data->calibration_orig ?
+                 40960000 / data->calibration_orig : INA209_SHUNT_DEFAULT;
+       }
+
+       i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION,
+                                    INA209_CONFIG_DEFAULT);
+       data->update_interval = ina209_interval_from_reg(INA209_CONFIG_DEFAULT);
+
+       /*
+        * Calibrate current LSB to 1mA. Shunt is in uOhms.
+        * See equation 13 in datasheet.
+        */
+       i2c_smbus_write_word_swapped(client, INA209_CALIBRATION,
+                                    clamp_val(40960000 / shunt, 1, 65535));
+
+       /* Clear status register */
+       i2c_smbus_read_word_swapped(client, INA209_STATUS);
+
+       return 0;
+}
+
+static int ina209_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = client->adapter;
+       struct ina209_data *data;
+       int ret;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
+
+       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, data);
+       mutex_init(&data->update_lock);
+
+       ret = ina209_init_client(client, data);
+       if (ret)
+               return ret;
+
+       /* Register sysfs hooks */
+       ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
+       if (ret)
+               goto out_restore_conf;
+
+       data->hwmon_dev = hwmon_device_register(&client->dev);
+       if (IS_ERR(data->hwmon_dev)) {
+               ret = PTR_ERR(data->hwmon_dev);
+               goto out_hwmon_device_register;
+       }
+
+       return 0;
+
+out_hwmon_device_register:
+       sysfs_remove_group(&client->dev.kobj, &ina209_group);
+out_restore_conf:
+       ina209_restore_conf(client, data);
+       return ret;
+}
+
+static int ina209_remove(struct i2c_client *client)
+{
+       struct ina209_data *data = i2c_get_clientdata(client);
+
+       hwmon_device_unregister(data->hwmon_dev);
+       sysfs_remove_group(&client->dev.kobj, &ina209_group);
+       ina209_restore_conf(client, data);
+
+       return 0;
+}
+
+static const struct i2c_device_id ina209_id[] = {
+       { "ina209", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, ina209_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ina209_driver = {
+       .class          = I2C_CLASS_HWMON,
+       .driver = {
+               .name   = "ina209",
+       },
+       .probe          = ina209_probe,
+       .remove         = ina209_remove,
+       .id_table       = ina209_id,
+};
+
+module_i2c_driver(ina209_driver);
+
+MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>, Paul Hays <Paul.Hays@cattail.ca>, Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("INA209 driver");
+MODULE_LICENSE("GPL");
index 117d66fcded6ae9fb9d6d28ac0d05aff67e480cb..37fc980fde240d7a7dd023cf27fc12aa8577a3f8 100644 (file)
@@ -19,6 +19,8 @@
  *            IT8726F  Super I/O chip w/LPC interface
  *            IT8728F  Super I/O chip w/LPC interface
  *            IT8758E  Super I/O chip w/LPC interface
+ *            IT8771E  Super I/O chip w/LPC interface
+ *            IT8772E  Super I/O chip w/LPC interface
  *            IT8782F  Super I/O chip w/LPC interface
  *            IT8783E/F Super I/O chip w/LPC interface
  *            Sis950   A clone of the IT8705F
@@ -61,8 +63,8 @@
 
 #define DRVNAME "it87"
 
-enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8782,
-            it8783 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
+            it8772, it8782, it8783 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -140,6 +142,8 @@ static inline void superio_exit(void)
 #define IT8721F_DEVID 0x8721
 #define IT8726F_DEVID 0x8726
 #define IT8728F_DEVID 0x8728
+#define IT8771E_DEVID 0x8771
+#define IT8772E_DEVID 0x8772
 #define IT8782F_DEVID 0x8782
 #define IT8783E_DEVID 0x8783
 #define IT87_ACT_REG  0x30
@@ -281,6 +285,24 @@ static const struct it87_devices it87_devices[] = {
                  | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
                .peci_mask = 0x07,
        },
+       [it8771] = {
+               .name = "it8771",
+               .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+                 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+                                       /* PECI: guesswork */
+                                       /* 12mV ADC (OHM) */
+                                       /* 16 bit fans (OHM) */
+               .peci_mask = 0x07,
+       },
+       [it8772] = {
+               .name = "it8772",
+               .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+                 | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI,
+                                       /* PECI (coreboot) */
+                                       /* 12mV ADC (HWSensors4, OHM) */
+                                       /* 16 bit fans (HWSensors4, OHM) */
+               .peci_mask = 0x07,
+       },
        [it8782] = {
                .name = "it8782",
                .features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
@@ -384,7 +406,7 @@ static int adc_lsb(const struct it87_data *data, int nr)
 static u8 in_to_reg(const struct it87_data *data, int nr, long val)
 {
        val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
-       return SENSORS_LIMIT(val, 0, 255);
+       return clamp_val(val, 0, 255);
 }
 
 static int in_from_reg(const struct it87_data *data, int nr, int val)
@@ -396,16 +418,15 @@ static inline u8 FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
-                            254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 static inline u16 FAN16_TO_REG(long rpm)
 {
        if (rpm == 0)
                return 0xffff;
-       return SENSORS_LIMIT((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
+       return clamp_val((1350000 + rpm) / (rpm * 2), 1, 0xfffe);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 255 ? 0 : \
@@ -414,8 +435,8 @@ static inline u16 FAN16_TO_REG(long rpm)
 #define FAN16_FROM_REG(val) ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
                             1350000 / ((val) * 2))
 
-#define TEMP_TO_REG(val) (SENSORS_LIMIT(((val) < 0 ? (((val) - 500) / 1000) : \
-                                       ((val) + 500) / 1000), -128, 127))
+#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (((val) - 500) / 1000) : \
+                                   ((val) + 500) / 1000), -128, 127))
 #define TEMP_FROM_REG(val) ((val) * 1000)
 
 static u8 pwm_to_reg(const struct it87_data *data, long val)
@@ -1709,6 +1730,12 @@ static int __init it87_find(unsigned short *address,
        case IT8728F_DEVID:
                sio_data->type = it8728;
                break;
+       case IT8771E_DEVID:
+               sio_data->type = it8771;
+               break;
+       case IT8772E_DEVID:
+               sio_data->type = it8772;
+               break;
        case IT8782F_DEVID:
                sio_data->type = it8782;
                break;
@@ -1826,10 +1853,11 @@ static int __init it87_find(unsigned short *address,
 
                reg = superio_inb(IT87_SIO_GPIO3_REG);
                if (sio_data->type == it8721 || sio_data->type == it8728 ||
+                   sio_data->type == it8771 || sio_data->type == it8772 ||
                    sio_data->type == it8782) {
                        /*
                         * IT8721F/IT8758E, and IT8782F don't have VID pins
-                        * at all, not sure about the IT8728F.
+                        * at all, not sure about the IT8728F and compatibles.
                         */
                        sio_data->skip_vid = 1;
                } else {
@@ -1883,7 +1911,9 @@ static int __init it87_find(unsigned short *address,
                if (reg & (1 << 0))
                        sio_data->internal |= (1 << 0);
                if ((reg & (1 << 1)) || sio_data->type == it8721 ||
-                   sio_data->type == it8728)
+                   sio_data->type == it8728 ||
+                   sio_data->type == it8771 ||
+                   sio_data->type == it8772)
                        sio_data->internal |= (1 << 1);
 
                /*
index e21e43c13156911989267fb555d24e5b98ddb67e..4a58f130fd4e622cf840969920f5dfd62a393a2b 100644 (file)
@@ -103,6 +103,9 @@ static const unsigned short normal_i2c[] = {
 #define MCP98243_DEVID         0x2100
 #define MCP98243_DEVID_MASK    0xfffc
 
+#define MCP98244_DEVID         0x2200
+#define MCP98244_DEVID_MASK    0xfffc
+
 #define MCP9843_DEVID          0x0000  /* Also matches mcp9805 */
 #define MCP9843_DEVID_MASK     0xfffe
 
@@ -147,6 +150,7 @@ static struct jc42_chips jc42_chips[] = {
        { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
        { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
        { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
+       { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
        { MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
        { NXP_MANID, SE97_DEVID, SE97_DEVID_MASK },
        { ONS_MANID, CAT6095_DEVID, CAT6095_DEVID_MASK },
@@ -237,9 +241,9 @@ static struct i2c_driver jc42_driver = {
 
 static u16 jc42_temp_to_reg(int temp, bool extended)
 {
-       int ntemp = SENSORS_LIMIT(temp,
-                                 extended ? JC42_TEMP_MIN_EXTENDED :
-                                 JC42_TEMP_MIN, JC42_TEMP_MAX);
+       int ntemp = clamp_val(temp,
+                             extended ? JC42_TEMP_MIN_EXTENDED :
+                             JC42_TEMP_MIN, JC42_TEMP_MAX);
 
        /* convert from 0.001 to 0.0625 resolution */
        return (ntemp * 2 / 125) & 0x1fff;
index eed4d94017886626db4e08d0a231a15fcf1d96f0..f644a2e5759942a514696f0bbaeb89666a7524ea 100644 (file)
@@ -209,9 +209,9 @@ static inline int lut_temp_to_reg(struct lm63_data *data, long val)
 {
        val -= data->temp2_offset;
        if (data->lut_temp_highres)
-               return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127500), 500);
+               return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127500), 500);
        else
-               return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127000), 1000);
+               return DIV_ROUND_CLOSEST(clamp_val(val, 0, 127000), 1000);
 }
 
 /*
@@ -415,7 +415,7 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
                return err;
 
        reg = nr ? LM63_REG_LUT_PWM(nr - 1) : LM63_REG_PWM_VALUE;
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->pwm1[nr] = data->pwm_highres ? val :
@@ -700,7 +700,7 @@ static ssize_t set_update_interval(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+       lm63_set_convrate(client, data, clamp_val(val, 0, 100000));
        mutex_unlock(&data->update_lock);
 
        return count;
index 7272176a9ec786c397b6427d43ac42b10ff80fb2..9bde9644b102d9bcdade74ce20db444b832f9270 100644 (file)
@@ -8,6 +8,7 @@
  * Guillaume Ligneul <guillaume.ligneul@gmail.com>
  * Adrien Demarez <adrien.demarez@bolloretelecom.eu>
  * Jeremy Laine <jeremy.laine@bolloretelecom.eu>
+ * Chris Verges <kg4ysn@gmail.com>
  *
  * This software program is licensed subject to the GNU General Public License
  * (GPL).Version 2,June 1991, available at
@@ -36,11 +37,30 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
 
 #define LM73_ID                        0x9001  /* 0x0190, byte-swapped */
 #define DRVNAME                        "lm73"
-#define LM73_TEMP_MIN          (-40)
-#define LM73_TEMP_MAX          150
+#define LM73_TEMP_MIN          (-256000 / 250)
+#define LM73_TEMP_MAX          (255750 / 250)
 
-/*-----------------------------------------------------------------------*/
+#define LM73_CTRL_RES_SHIFT    5
+#define LM73_CTRL_RES_MASK     (BIT(5) | BIT(6))
+#define LM73_CTRL_TO_MASK      BIT(7)
+
+#define LM73_CTRL_HI_SHIFT     2
+#define LM73_CTRL_LO_SHIFT     1
+
+static const unsigned short lm73_convrates[] = {
+       14,     /* 11-bits (0.25000 C/LSB): RES1 Bit = 0, RES0 Bit = 0 */
+       28,     /* 12-bits (0.12500 C/LSB): RES1 Bit = 0, RES0 Bit = 1 */
+       56,     /* 13-bits (0.06250 C/LSB): RES1 Bit = 1, RES0 Bit = 0 */
+       112,    /* 14-bits (0.03125 C/LSB): RES1 Bit = 1, RES0 Bit = 1 */
+};
 
+struct lm73_data {
+       struct device *hwmon_dev;
+       struct mutex lock;
+       u8 ctrl;                        /* control register value */
+};
+
+/*-----------------------------------------------------------------------*/
 
 static ssize_t set_temp(struct device *dev, struct device_attribute *da,
                        const char *buf, size_t count)
@@ -56,8 +76,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
                return status;
 
        /* Write value */
-       value = (short) SENSORS_LIMIT(temp/250, (LM73_TEMP_MIN*4),
-               (LM73_TEMP_MAX*4)) << 5;
+       value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
        err = i2c_smbus_write_word_swapped(client, attr->index, value);
        return (err < 0) ? err : count;
 }
@@ -79,6 +98,73 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
        return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
 }
 
+static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
+                           const char *buf, size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lm73_data *data = i2c_get_clientdata(client);
+       unsigned long convrate;
+       s32 err;
+       int res = 0;
+
+       err = kstrtoul(buf, 10, &convrate);
+       if (err < 0)
+               return err;
+
+       /*
+        * Convert the desired conversion rate into register bits.
+        * res is already initialized, and everything past the second-to-last
+        * value in the array is treated as belonging to the last value
+        * in the array.
+        */
+       while (res < (ARRAY_SIZE(lm73_convrates) - 1) &&
+                       convrate > lm73_convrates[res])
+               res++;
+
+       mutex_lock(&data->lock);
+       data->ctrl &= LM73_CTRL_TO_MASK;
+       data->ctrl |= res << LM73_CTRL_RES_SHIFT;
+       err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+       mutex_unlock(&data->lock);
+
+       if (err < 0)
+               return err;
+
+       return count;
+}
+
+static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
+                            char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lm73_data *data = i2c_get_clientdata(client);
+       int res;
+
+       res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
+       return scnprintf(buf, PAGE_SIZE, "%hu\n", lm73_convrates[res]);
+}
+
+static ssize_t show_maxmin_alarm(struct device *dev,
+                                struct device_attribute *da, char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct lm73_data *data = i2c_get_clientdata(client);
+       s32 ctrl;
+
+       mutex_lock(&data->lock);
+       ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+       if (ctrl < 0)
+               goto abort;
+       data->ctrl = ctrl;
+       mutex_unlock(&data->lock);
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", (ctrl >> attr->index) & 1);
+
+abort:
+       mutex_unlock(&data->lock);
+       return ctrl;
+}
 
 /*-----------------------------------------------------------------------*/
 
@@ -90,13 +176,20 @@ static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
                        show_temp, set_temp, LM73_REG_MIN);
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
                        show_temp, NULL, LM73_REG_INPUT);
-
+static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
+                       show_convrate, set_convrate, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+                       show_maxmin_alarm, NULL, LM73_CTRL_HI_SHIFT);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
+                       show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
 
 static struct attribute *lm73_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp1_min.dev_attr.attr,
-
+       &sensor_dev_attr_update_interval.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
        NULL
 };
 
@@ -111,23 +204,36 @@ static const struct attribute_group lm73_group = {
 static int
 lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-       struct device *hwmon_dev;
        int status;
+       struct lm73_data *data;
+       int ctrl;
+
+       data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, data);
+       mutex_init(&data->lock);
+
+       ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+       if (ctrl < 0)
+               return ctrl;
+       data->ctrl = ctrl;
 
        /* Register sysfs hooks */
        status = sysfs_create_group(&client->dev.kobj, &lm73_group);
        if (status)
                return status;
 
-       hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(hwmon_dev)) {
-               status = PTR_ERR(hwmon_dev);
+       data->hwmon_dev = hwmon_device_register(&client->dev);
+       if (IS_ERR(data->hwmon_dev)) {
+               status = PTR_ERR(data->hwmon_dev);
                goto exit_remove;
        }
-       i2c_set_clientdata(client, hwmon_dev);
 
        dev_info(&client->dev, "%s: sensor '%s'\n",
-                dev_name(hwmon_dev), client->name);
+                dev_name(data->hwmon_dev), client->name);
 
        return 0;
 
@@ -138,9 +244,9 @@ exit_remove:
 
 static int lm73_remove(struct i2c_client *client)
 {
-       struct device *hwmon_dev = i2c_get_clientdata(client);
+       struct lm73_data *data = i2c_get_clientdata(client);
 
-       hwmon_device_unregister(hwmon_dev);
+       hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&client->dev.kobj, &lm73_group);
        return 0;
 }
index 89aa9098ba5b9ef1759c904752cff6f9cddf00e7..668ff4721323a22842b94823d17505da6bf68b27 100644 (file)
@@ -36,7 +36,7 @@
    REG: (0.5C/bit, two's complement) << 7 */
 static inline u16 LM75_TEMP_TO_REG(long temp)
 {
-       int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
+       int ntemp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
        ntemp += (ntemp < 0 ? -250 : 250);
        return (u16)((ntemp / 500) << 7);
 }
index f82acf67acf54c7180a35cd45b263615fc87a024..f17beb5e6dd63bd7055f35492546c452c2fedcb1 100644 (file)
@@ -101,7 +101,7 @@ static struct i2c_driver lm77_driver = {
  */
 static inline s16 LM77_TEMP_TO_REG(int temp)
 {
-       int ntemp = SENSORS_LIMIT(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
+       int ntemp = clamp_val(temp, LM77_TEMP_MIN, LM77_TEMP_MAX);
        return (ntemp / 500) * 8;
 }
 
index 53d6ee8ffa3370e9cfc1e60049d9704e2eb5a7f2..483538fa1bd58644b68607039be7ef8ca0cb8ed5 100644 (file)
@@ -85,7 +85,7 @@ enum chips { lm78, lm79 };
  */
 static inline u8 IN_TO_REG(unsigned long val)
 {
-       unsigned long nval = SENSORS_LIMIT(val, 0, 4080);
+       unsigned long nval = clamp_val(val, 0, 4080);
        return (nval + 8) / 16;
 }
 #define IN_FROM_REG(val) ((val) *  16)
@@ -94,7 +94,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
 {
        if (rpm <= 0)
                return 255;
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 static inline int FAN_FROM_REG(u8 val, int div)
@@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       int nval = SENSORS_LIMIT(val, -128000, 127000) ;
+       int nval = clamp_val(val, -128000, 127000) ;
        return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
 }
 
index 28a8b71f45717a7a9d7ba68e34c3623426948bdb..357fbb9987284cf87fa3faf9bf05235a5427e5a1 100644 (file)
@@ -72,15 +72,15 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
  * Fixing this is just not worth it.
  */
 
-#define IN_TO_REG(val)         (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_TO_REG(val)         (clamp_val(((val) + 5) / 10, 0, 255))
 #define IN_FROM_REG(val)       ((val) * 10)
 
 static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -102,7 +102,7 @@ static inline long TEMP_FROM_REG(u16 temp)
 #define TEMP_LIMIT_FROM_REG(val)       (((val) > 0x80 ? \
        (val) - 0x100 : (val)) * 1000)
 
-#define TEMP_LIMIT_TO_REG(val)         SENSORS_LIMIT((val) < 0 ? \
+#define TEMP_LIMIT_TO_REG(val)         clamp_val((val) < 0 ? \
        ((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
 
 #define DIV_FROM_REG(val)              (1 << (val))
index 9f2dd77e1e0e9b0025246156a68412ffb469bffb..47ade8ba152d5c0484e3927f4a1f416e20abaf64 100644 (file)
@@ -139,7 +139,7 @@ static const int lm85_scaling[] = {  /* .001 Volts */
 #define SCALE(val, from, to)   (((val) * (to) + ((from) / 2)) / (from))
 
 #define INS_TO_REG(n, val)     \
-               SENSORS_LIMIT(SCALE(val, lm85_scaling[n], 192), 0, 255)
+               clamp_val(SCALE(val, lm85_scaling[n], 192), 0, 255)
 
 #define INSEXT_FROM_REG(n, val, ext)   \
                SCALE(((val) << 4) + (ext), 192 << 4, lm85_scaling[n])
@@ -151,19 +151,19 @@ static inline u16 FAN_TO_REG(unsigned long val)
 {
        if (!val)
                return 0xffff;
-       return SENSORS_LIMIT(5400000 / val, 1, 0xfffe);
+       return clamp_val(5400000 / val, 1, 0xfffe);
 }
 #define FAN_FROM_REG(val)      ((val) == 0 ? -1 : (val) == 0xffff ? 0 : \
                                 5400000 / (val))
 
 /* Temperature is reported in .001 degC increments */
 #define TEMP_TO_REG(val)       \
-               SENSORS_LIMIT(SCALE(val, 1000, 1), -127, 127)
+               clamp_val(SCALE(val, 1000, 1), -127, 127)
 #define TEMPEXT_FROM_REG(val, ext)     \
                SCALE(((val) << 4) + (ext), 16, 1000)
 #define TEMP_FROM_REG(val)     ((val) * 1000)
 
-#define PWM_TO_REG(val)                        SENSORS_LIMIT(val, 0, 255)
+#define PWM_TO_REG(val)                        clamp_val(val, 0, 255)
 #define PWM_FROM_REG(val)              (val)
 
 
@@ -258,7 +258,7 @@ static int ZONE_TO_REG(int zone)
        return i << 5;
 }
 
-#define HYST_TO_REG(val)       SENSORS_LIMIT(((val) + 500) / 1000, 0, 15)
+#define HYST_TO_REG(val)       clamp_val(((val) + 500) / 1000, 0, 15)
 #define HYST_FROM_REG(val)     ((val) * 1000)
 
 /*
index 863412a02bdd1484434583625d8670fa1d8b6079..8eeb141c85acdad04d23305cb581e76cef3d53fd 100644 (file)
@@ -931,7 +931,7 @@ static ssize_t set_update_interval(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+       lm90_set_convrate(client, data, clamp_val(val, 0, 100000));
        mutex_unlock(&data->update_lock);
 
        return count;
index 1a003f73e4e4a8b96d4a87aa0dacecfb7ed15eff..b40f34cdb3caf34d9a72d3c7e90c334740c5c229 100644 (file)
@@ -371,8 +371,8 @@ static unsigned LM93_IN_FROM_REG(int nr, u8 reg)
 static u8 LM93_IN_TO_REG(int nr, unsigned val)
 {
        /* range limit */
-       const long mV = SENSORS_LIMIT(val,
-               lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
+       const long mV = clamp_val(val,
+                                 lm93_vin_val_min[nr], lm93_vin_val_max[nr]);
 
        /* try not to lose too much precision here */
        const long uV = mV * 1000;
@@ -385,8 +385,8 @@ static u8 LM93_IN_TO_REG(int nr, unsigned val)
        const long intercept = uV_min - slope * lm93_vin_reg_min[nr];
 
        u8 result = ((uV - intercept + (slope/2)) / slope);
-       result = SENSORS_LIMIT(result,
-                       lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
+       result = clamp_val(result,
+                          lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]);
        return result;
 }
 
@@ -411,10 +411,10 @@ static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid)
 {
        long uV_offset = vid * 1000 - val * 10000;
        if (upper) {
-               uV_offset = SENSORS_LIMIT(uV_offset, 12500, 200000);
+               uV_offset = clamp_val(uV_offset, 12500, 200000);
                return (u8)((uV_offset /  12500 - 1) << 4);
        } else {
-               uV_offset = SENSORS_LIMIT(uV_offset, -400000, -25000);
+               uV_offset = clamp_val(uV_offset, -400000, -25000);
                return (u8)((uV_offset / -25000 - 1) << 0);
        }
 }
@@ -437,7 +437,7 @@ static int LM93_TEMP_FROM_REG(u8 reg)
  */
 static u8 LM93_TEMP_TO_REG(long temp)
 {
-       int ntemp = SENSORS_LIMIT(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
+       int ntemp = clamp_val(temp, LM93_TEMP_MIN, LM93_TEMP_MAX);
        ntemp += (ntemp < 0 ? -500 : 500);
        return (u8)(ntemp / 1000);
 }
@@ -472,7 +472,7 @@ static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode)
 {
        int factor = mode ? 5 : 10;
 
-       off = SENSORS_LIMIT(off, LM93_TEMP_OFFSET_MIN,
+       off = clamp_val(off, LM93_TEMP_OFFSET_MIN,
                mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0);
        return (u8)((off + factor/2) / factor);
 }
@@ -620,8 +620,8 @@ static u16 LM93_FAN_TO_REG(long rpm)
        if (rpm == 0) {
                count = 0x3fff;
        } else {
-               rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-               count = SENSORS_LIMIT((1350000 + rpm) / rpm, 1, 0x3ffe);
+               rpm = clamp_val(rpm, 1, 1000000);
+               count = clamp_val((1350000 + rpm) / rpm, 1, 0x3ffe);
        }
 
        regs = count << 2;
@@ -692,7 +692,7 @@ static int LM93_RAMP_FROM_REG(u8 reg)
  */
 static u8 LM93_RAMP_TO_REG(int ramp)
 {
-       ramp = SENSORS_LIMIT(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
+       ramp = clamp_val(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX);
        return (u8)((ramp + 2) / 5);
 }
 
@@ -702,7 +702,7 @@ static u8 LM93_RAMP_TO_REG(int ramp)
  */
 static u8 LM93_PROCHOT_TO_REG(long prochot)
 {
-       prochot = SENSORS_LIMIT(prochot, 0, 255);
+       prochot = clamp_val(prochot, 0, 255);
        return (u8)prochot;
 }
 
@@ -2052,7 +2052,7 @@ static ssize_t store_pwm_auto_channels(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->block9[nr][LM93_PWM_CTL1] = SENSORS_LIMIT(val, 0, 255);
+       data->block9[nr][LM93_PWM_CTL1] = clamp_val(val, 0, 255);
        lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL1),
                                data->block9[nr][LM93_PWM_CTL1]);
        mutex_unlock(&data->update_lock);
@@ -2397,7 +2397,7 @@ static ssize_t store_prochot_override_duty_cycle(struct device *dev,
 
        mutex_lock(&data->update_lock);
        data->prochot_override = (data->prochot_override & 0xf0) |
-                                       SENSORS_LIMIT(val, 0, 15);
+                                       clamp_val(val, 0, 15);
        lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE,
                        data->prochot_override);
        mutex_unlock(&data->update_lock);
index 2915fd90836495c16cb4e89485daf5b457b6289b..a6c85f0ff8f3e5a9ebf099a8d251b95a7fe636d6 100644 (file)
@@ -259,7 +259,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
 
        val /= 1000;
 
-       val = SENSORS_LIMIT(val, 0, (index == 6 ? 127 : 255));
+       val = clamp_val(val, 0, (index == 6 ? 127 : 255));
 
        mutex_lock(&data->update_lock);
 
@@ -284,7 +284,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
 
        val /= 1000;
 
-       val = SENSORS_LIMIT(val, 0, 31);
+       val = clamp_val(val, 0, 31);
 
        mutex_lock(&data->update_lock);
 
index e0019c69d1bbcd19b209446146356a82ba7926bb..2fa2c02f5569c5af563ac5a90e865917e648fdf2 100644 (file)
@@ -118,7 +118,7 @@ static inline int LIMIT_TO_MV(int limit, int range)
 
 static inline int MV_TO_LIMIT(int mv, int range)
 {
-       return SENSORS_LIMIT(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
+       return clamp_val(DIV_ROUND_CLOSEST(mv * 256, range), 0, 255);
 }
 
 static inline int ADC_TO_CURR(int adc, int gain)
index 666d9f6263eb462e41a5ec319ca5d29dc67eb540..a7626358c95df29c2ccb62fa89ed23251acfb979 100644 (file)
@@ -215,7 +215,7 @@ static ssize_t set_temp_max(struct device *dev,
                return ret;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+       data->temp_max[index] = clamp_val(temp/1000, -128, 127);
        if (i2c_smbus_write_byte_data(client,
                                        MAX1668_REG_LIMH_WR(index),
                                        data->temp_max[index]))
@@ -240,7 +240,7 @@ static ssize_t set_temp_min(struct device *dev,
                return ret;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[index] = SENSORS_LIMIT(temp/1000, -128, 127);
+       data->temp_min[index] = clamp_val(temp/1000, -128, 127);
        if (i2c_smbus_write_byte_data(client,
                                        MAX1668_REG_LIML_WR(index),
                                        data->temp_max[index]))
index 6e60036abfa724a8ea4773faf9f2dab18c902fce..3e7b4269f5b9df67658e5e3f855aaec6d95b3c29 100644 (file)
@@ -74,7 +74,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
 
 #define FAN_FROM_REG(val, rpm_range)   ((val) == 0 || (val) == 255 ? \
                                0 : (rpm_ranges[rpm_range] * 30) / (val))
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+#define TEMP_LIMIT_TO_REG(val) clamp_val((val) / 1000, 0, 255)
 
 /*
  * Client data (each client gets its own)
@@ -312,7 +312,7 @@ static ssize_t set_pwm(struct device *dev,
        if (res)
                return res;
 
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->pwm[attr->index] = (u8)(val * 120 / 255);
index 223461a6d70f974ebeec806bc4c3cc8cb780088f..57d58cd3220682030fcb3f0d32b62e4e77648c04 100644 (file)
@@ -239,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_high[attr2->nr] = SENSORS_LIMIT(temp_to_reg(val), 0, 255);
+       data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
        i2c_smbus_write_byte_data(client, attr2->index,
                                  data->temp_high[attr2->nr]);
        mutex_unlock(&data->update_lock);
index f739f83bafb9d9e23c4d47f94655cb12d1a63ca1..3c16cbd4c00286d465bc2aed825330c0c89e9661 100644 (file)
@@ -245,7 +245,7 @@ static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
        if (err)
                return err;
 
-       rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
+       rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
 
        /*
         * Divide the required speed by 60 to get from rpm to rps, then
@@ -313,7 +313,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
        if (err)
                return err;
 
-       pwm = SENSORS_LIMIT(pwm, 0, 255);
+       pwm = clamp_val(pwm, 0, 255);
 
        mutex_lock(&data->update_lock);
 
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
new file mode 100644 (file)
index 0000000..bf4aa37
--- /dev/null
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * based on max1668.c
+ * Copyright (c) 2011 David George <david.george@ska.ac.za>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/max6697.h>
+
+enum chips { max6581, max6602, max6622, max6636, max6689, max6693, max6694,
+            max6697, max6698, max6699 };
+
+/* Report local sensor as temp1 */
+
+static const u8 MAX6697_REG_TEMP[] = {
+                       0x07, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08 };
+static const u8 MAX6697_REG_TEMP_EXT[] = {
+                       0x57, 0x09, 0x52, 0x53, 0x54, 0x55, 0x56, 0 };
+static const u8 MAX6697_REG_MAX[] = {
+                       0x17, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x18 };
+static const u8 MAX6697_REG_CRIT[] = {
+                       0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27 };
+
+/*
+ * Map device tree / platform data register bit map to chip bit map.
+ * Applies to alert register and over-temperature register.
+ */
+#define MAX6697_MAP_BITS(reg)  ((((reg) & 0x7e) >> 1) | \
+                                (((reg) & 0x01) << 6) | ((reg) & 0x80))
+
+#define MAX6697_REG_STAT(n)            (0x44 + (n))
+
+#define MAX6697_REG_CONFIG             0x41
+#define MAX6581_CONF_EXTENDED          (1 << 1)
+#define MAX6693_CONF_BETA              (1 << 2)
+#define MAX6697_CONF_RESISTANCE                (1 << 3)
+#define MAX6697_CONF_TIMEOUT           (1 << 5)
+#define MAX6697_REG_ALERT_MASK         0x42
+#define MAX6697_REG_OVERT_MASK         0x43
+
+#define MAX6581_REG_RESISTANCE         0x4a
+#define MAX6581_REG_IDEALITY           0x4b
+#define MAX6581_REG_IDEALITY_SELECT    0x4c
+#define MAX6581_REG_OFFSET             0x4d
+#define MAX6581_REG_OFFSET_SELECT      0x4e
+
+#define MAX6697_CONV_TIME              156     /* ms per channel, worst case */
+
+struct max6697_chip_data {
+       int channels;
+       u32 have_ext;
+       u32 have_crit;
+       u32 have_fault;
+       u8 valid_conf;
+       const u8 *alarm_map;
+};
+
+struct max6697_data {
+       struct device *hwmon_dev;
+
+       enum chips type;
+       const struct max6697_chip_data *chip;
+
+       int update_interval;    /* in milli-seconds */
+       int temp_offset;        /* in degrees C */
+
+       struct mutex update_lock;
+       unsigned long last_updated;     /* In jiffies */
+       bool valid;             /* true if following fields are valid */
+
+       /* 1x local and up to 7x remote */
+       u8 temp[8][4];          /* [nr][0]=temp [1]=ext [2]=max [3]=crit */
+#define MAX6697_TEMP_INPUT     0
+#define MAX6697_TEMP_EXT       1
+#define MAX6697_TEMP_MAX       2
+#define MAX6697_TEMP_CRIT      3
+       u32 alarms;
+};
+
+/* Diode fault status bits on MAX6581 are right shifted by one bit */
+static const u8 max6581_alarm_map[] = {
+        0, 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15,
+        16, 17, 18, 19, 20, 21, 22, 23 };
+
+static const struct max6697_chip_data max6697_chip_data[] = {
+       [max6581] = {
+               .channels = 8,
+               .have_crit = 0xff,
+               .have_ext = 0x7f,
+               .have_fault = 0xfe,
+               .valid_conf = MAX6581_CONF_EXTENDED | MAX6697_CONF_TIMEOUT,
+               .alarm_map = max6581_alarm_map,
+       },
+       [max6602] = {
+               .channels = 5,
+               .have_crit = 0x12,
+               .have_ext = 0x02,
+               .have_fault = 0x1e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+       [max6622] = {
+               .channels = 5,
+               .have_crit = 0x12,
+               .have_ext = 0x02,
+               .have_fault = 0x1e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+       [max6636] = {
+               .channels = 7,
+               .have_crit = 0x72,
+               .have_ext = 0x02,
+               .have_fault = 0x7e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+       [max6689] = {
+               .channels = 7,
+               .have_crit = 0x72,
+               .have_ext = 0x02,
+               .have_fault = 0x7e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+       [max6693] = {
+               .channels = 7,
+               .have_crit = 0x72,
+               .have_ext = 0x02,
+               .have_fault = 0x7e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
+                 MAX6697_CONF_TIMEOUT,
+       },
+       [max6694] = {
+               .channels = 5,
+               .have_crit = 0x12,
+               .have_ext = 0x02,
+               .have_fault = 0x1e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6693_CONF_BETA |
+                 MAX6697_CONF_TIMEOUT,
+       },
+       [max6697] = {
+               .channels = 7,
+               .have_crit = 0x72,
+               .have_ext = 0x02,
+               .have_fault = 0x7e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+       [max6698] = {
+               .channels = 7,
+               .have_crit = 0x72,
+               .have_ext = 0x02,
+               .have_fault = 0x0e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+       [max6699] = {
+               .channels = 5,
+               .have_crit = 0x12,
+               .have_ext = 0x02,
+               .have_fault = 0x1e,
+               .valid_conf = MAX6697_CONF_RESISTANCE | MAX6697_CONF_TIMEOUT,
+       },
+};
+
+static struct max6697_data *max6697_update_device(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *ret = data;
+       int val;
+       int i;
+       u32 alarms;
+
+       mutex_lock(&data->update_lock);
+
+       if (data->valid &&
+           !time_after(jiffies, data->last_updated
+                       + msecs_to_jiffies(data->update_interval)))
+               goto abort;
+
+       for (i = 0; i < data->chip->channels; i++) {
+               if (data->chip->have_ext & (1 << i)) {
+                       val = i2c_smbus_read_byte_data(client,
+                                                      MAX6697_REG_TEMP_EXT[i]);
+                       if (unlikely(val < 0)) {
+                               ret = ERR_PTR(val);
+                               goto abort;
+                       }
+                       data->temp[i][MAX6697_TEMP_EXT] = val;
+               }
+
+               val = i2c_smbus_read_byte_data(client, MAX6697_REG_TEMP[i]);
+               if (unlikely(val < 0)) {
+                       ret = ERR_PTR(val);
+                       goto abort;
+               }
+               data->temp[i][MAX6697_TEMP_INPUT] = val;
+
+               val = i2c_smbus_read_byte_data(client, MAX6697_REG_MAX[i]);
+               if (unlikely(val < 0)) {
+                       ret = ERR_PTR(val);
+                       goto abort;
+               }
+               data->temp[i][MAX6697_TEMP_MAX] = val;
+
+               if (data->chip->have_crit & (1 << i)) {
+                       val = i2c_smbus_read_byte_data(client,
+                                                      MAX6697_REG_CRIT[i]);
+                       if (unlikely(val < 0)) {
+                               ret = ERR_PTR(val);
+                               goto abort;
+                       }
+                       data->temp[i][MAX6697_TEMP_CRIT] = val;
+               }
+       }
+
+       alarms = 0;
+       for (i = 0; i < 3; i++) {
+               val = i2c_smbus_read_byte_data(client, MAX6697_REG_STAT(i));
+               if (unlikely(val < 0)) {
+                       ret = ERR_PTR(val);
+                       goto abort;
+               }
+               alarms = (alarms << 8) | val;
+       }
+       data->alarms = alarms;
+       data->last_updated = jiffies;
+       data->valid = true;
+abort:
+       mutex_unlock(&data->update_lock);
+
+       return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+                              struct device_attribute *devattr, char *buf)
+{
+       int index = to_sensor_dev_attr(devattr)->index;
+       struct max6697_data *data = max6697_update_device(dev);
+       int temp;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       temp = (data->temp[index][MAX6697_TEMP_INPUT] - data->temp_offset) << 3;
+       temp |= data->temp[index][MAX6697_TEMP_EXT] >> 5;
+
+       return sprintf(buf, "%d\n", temp * 125);
+}
+
+static ssize_t show_temp(struct device *dev,
+                        struct device_attribute *devattr, char *buf)
+{
+       int nr = to_sensor_dev_attr_2(devattr)->nr;
+       int index = to_sensor_dev_attr_2(devattr)->index;
+       struct max6697_data *data = max6697_update_device(dev);
+       int temp;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       temp = data->temp[nr][index];
+       temp -= data->temp_offset;
+
+       return sprintf(buf, "%d\n", temp * 1000);
+}
+
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       int index = to_sensor_dev_attr(attr)->index;
+       struct max6697_data *data = max6697_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       if (data->chip->alarm_map)
+               index = data->chip->alarm_map[index];
+
+       return sprintf(buf, "%u\n", (data->alarms >> index) & 0x1);
+}
+
+static ssize_t set_temp(struct device *dev,
+                       struct device_attribute *devattr,
+                       const char *buf, size_t count)
+{
+       int nr = to_sensor_dev_attr_2(devattr)->nr;
+       int index = to_sensor_dev_attr_2(devattr)->index;
+       struct i2c_client *client = to_i2c_client(dev);
+       struct max6697_data *data = i2c_get_clientdata(client);
+       long temp;
+       int ret;
+
+       ret = kstrtol(buf, 10, &temp);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&data->update_lock);
+       temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
+       temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
+       data->temp[nr][index] = temp;
+       ret = i2c_smbus_write_byte_data(client,
+                                       index == 2 ? MAX6697_REG_MAX[nr]
+                                                  : MAX6697_REG_CRIT[nr],
+                                       temp);
+       mutex_unlock(&data->update_lock);
+
+       return ret < 0 ? ret : count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           0, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           0, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           1, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           1, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp_input, NULL, 2);
+static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           2, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           2, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp_input, NULL, 3);
+static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           3, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           3, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp5_input, S_IRUGO, show_temp_input, NULL, 4);
+static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           4, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           4, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp6_input, S_IRUGO, show_temp_input, NULL, 5);
+static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           5, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           5, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp7_input, S_IRUGO, show_temp_input, NULL, 6);
+static SENSOR_DEVICE_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           6, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           6, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, show_temp_input, NULL, 7);
+static SENSOR_DEVICE_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           7, MAX6697_TEMP_MAX);
+static SENSOR_DEVICE_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
+                           7, MAX6697_TEMP_CRIT);
+
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 22);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 16);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 17);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 18);
+static SENSOR_DEVICE_ATTR(temp5_max_alarm, S_IRUGO, show_alarm, NULL, 19);
+static SENSOR_DEVICE_ATTR(temp6_max_alarm, S_IRUGO, show_alarm, NULL, 20);
+static SENSOR_DEVICE_ATTR(temp7_max_alarm, S_IRUGO, show_alarm, NULL, 21);
+static SENSOR_DEVICE_ATTR(temp8_max_alarm, S_IRUGO, show_alarm, NULL, 23);
+
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp5_crit_alarm, S_IRUGO, show_alarm, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp6_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp7_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp8_crit_alarm, S_IRUGO, show_alarm, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault, S_IRUGO, show_alarm, NULL, 7);
+
+static struct attribute *max6697_attributes[8][7] = {
+       {
+               &sensor_dev_attr_temp1_input.dev_attr.attr,
+               &sensor_dev_attr_temp1_max.dev_attr.attr,
+               &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp1_crit.dev_attr.attr,
+               &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp2_input.dev_attr.attr,
+               &sensor_dev_attr_temp2_max.dev_attr.attr,
+               &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp2_crit.dev_attr.attr,
+               &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp2_fault.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp3_input.dev_attr.attr,
+               &sensor_dev_attr_temp3_max.dev_attr.attr,
+               &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp3_crit.dev_attr.attr,
+               &sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp3_fault.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp4_input.dev_attr.attr,
+               &sensor_dev_attr_temp4_max.dev_attr.attr,
+               &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp4_crit.dev_attr.attr,
+               &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp4_fault.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp5_input.dev_attr.attr,
+               &sensor_dev_attr_temp5_max.dev_attr.attr,
+               &sensor_dev_attr_temp5_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp5_crit.dev_attr.attr,
+               &sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp5_fault.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp6_input.dev_attr.attr,
+               &sensor_dev_attr_temp6_max.dev_attr.attr,
+               &sensor_dev_attr_temp6_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp6_crit.dev_attr.attr,
+               &sensor_dev_attr_temp6_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp6_fault.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp7_input.dev_attr.attr,
+               &sensor_dev_attr_temp7_max.dev_attr.attr,
+               &sensor_dev_attr_temp7_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp7_crit.dev_attr.attr,
+               &sensor_dev_attr_temp7_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp7_fault.dev_attr.attr,
+               NULL
+       }, {
+               &sensor_dev_attr_temp8_input.dev_attr.attr,
+               &sensor_dev_attr_temp8_max.dev_attr.attr,
+               &sensor_dev_attr_temp8_max_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp8_crit.dev_attr.attr,
+               &sensor_dev_attr_temp8_crit_alarm.dev_attr.attr,
+               &sensor_dev_attr_temp8_fault.dev_attr.attr,
+               NULL
+       }
+};
+
+static const struct attribute_group max6697_group[8] = {
+       { .attrs = max6697_attributes[0] },
+       { .attrs = max6697_attributes[1] },
+       { .attrs = max6697_attributes[2] },
+       { .attrs = max6697_attributes[3] },
+       { .attrs = max6697_attributes[4] },
+       { .attrs = max6697_attributes[5] },
+       { .attrs = max6697_attributes[6] },
+       { .attrs = max6697_attributes[7] },
+};
+
+static void max6697_get_config_of(struct device_node *node,
+                                 struct max6697_platform_data *pdata)
+{
+       int len;
+       const __be32 *prop;
+
+       prop = of_get_property(node, "smbus-timeout-disable", &len);
+       if (prop)
+               pdata->smbus_timeout_disable = true;
+       prop = of_get_property(node, "extended-range-enable", &len);
+       if (prop)
+               pdata->extended_range_enable = true;
+       prop = of_get_property(node, "beta-compensation-enable", &len);
+       if (prop)
+               pdata->beta_compensation = true;
+       prop = of_get_property(node, "alert-mask", &len);
+       if (prop && len == sizeof(u32))
+               pdata->alert_mask = be32_to_cpu(prop[0]);
+       prop = of_get_property(node, "over-temperature-mask", &len);
+       if (prop && len == sizeof(u32))
+               pdata->over_temperature_mask = be32_to_cpu(prop[0]);
+       prop = of_get_property(node, "resistance-cancellation", &len);
+       if (prop) {
+               if (len == sizeof(u32))
+                       pdata->resistance_cancellation = be32_to_cpu(prop[0]);
+               else
+                       pdata->resistance_cancellation = 0xfe;
+       }
+       prop = of_get_property(node, "transistor-ideality", &len);
+       if (prop && len == 2 * sizeof(u32)) {
+                       pdata->ideality_mask = be32_to_cpu(prop[0]);
+                       pdata->ideality_value = be32_to_cpu(prop[1]);
+       }
+}
+
+static int max6697_init_chip(struct i2c_client *client)
+{
+       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
+       struct max6697_platform_data p;
+       const struct max6697_chip_data *chip = data->chip;
+       int factor = chip->channels;
+       int ret, reg;
+
+       /*
+        * Don't touch configuration if neither platform data nor OF
+        * configuration was specified. If that is the case, use the
+        * current chip configuration.
+        */
+       if (!pdata && !client->dev.of_node) {
+               reg = i2c_smbus_read_byte_data(client, MAX6697_REG_CONFIG);
+               if (reg < 0)
+                       return reg;
+               if (data->type == max6581) {
+                       if (reg & MAX6581_CONF_EXTENDED)
+                               data->temp_offset = 64;
+                       reg = i2c_smbus_read_byte_data(client,
+                                                      MAX6581_REG_RESISTANCE);
+                       if (reg < 0)
+                               return reg;
+                       factor += hweight8(reg);
+               } else {
+                       if (reg & MAX6697_CONF_RESISTANCE)
+                               factor++;
+               }
+               goto done;
+       }
+
+       if (client->dev.of_node) {
+               memset(&p, 0, sizeof(p));
+               max6697_get_config_of(client->dev.of_node, &p);
+               pdata = &p;
+       }
+
+       reg = 0;
+       if (pdata->smbus_timeout_disable &&
+           (chip->valid_conf & MAX6697_CONF_TIMEOUT)) {
+               reg |= MAX6697_CONF_TIMEOUT;
+       }
+       if (pdata->extended_range_enable &&
+           (chip->valid_conf & MAX6581_CONF_EXTENDED)) {
+               reg |= MAX6581_CONF_EXTENDED;
+               data->temp_offset = 64;
+       }
+       if (pdata->resistance_cancellation &&
+           (chip->valid_conf & MAX6697_CONF_RESISTANCE)) {
+               reg |= MAX6697_CONF_RESISTANCE;
+               factor++;
+       }
+       if (pdata->beta_compensation &&
+           (chip->valid_conf & MAX6693_CONF_BETA)) {
+               reg |= MAX6693_CONF_BETA;
+       }
+
+       ret = i2c_smbus_write_byte_data(client, MAX6697_REG_CONFIG, reg);
+       if (ret < 0)
+               return ret;
+
+       ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
+                                       MAX6697_MAP_BITS(pdata->alert_mask));
+       if (ret < 0)
+               return ret;
+
+       ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
+                               MAX6697_MAP_BITS(pdata->over_temperature_mask));
+       if (ret < 0)
+               return ret;
+
+       if (data->type == max6581) {
+               factor += hweight8(pdata->resistance_cancellation >> 1);
+               ret = i2c_smbus_write_byte_data(client, MAX6581_REG_RESISTANCE,
+                                       pdata->resistance_cancellation >> 1);
+               if (ret < 0)
+                       return ret;
+               ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
+                                               pdata->ideality_mask >> 1);
+               if (ret < 0)
+                       return ret;
+               ret = i2c_smbus_write_byte_data(client,
+                                               MAX6581_REG_IDEALITY_SELECT,
+                                               pdata->ideality_value);
+               if (ret < 0)
+                       return ret;
+       }
+done:
+       data->update_interval = factor * MAX6697_CONV_TIME;
+       return 0;
+}
+
+static void max6697_remove_files(struct i2c_client *client)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(max6697_group); i++)
+               sysfs_remove_group(&client->dev.kobj, &max6697_group[i]);
+}
+
+static int max6697_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = client->adapter;
+       struct device *dev = &client->dev;
+       struct max6697_data *data;
+       int i, err;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -ENODEV;
+
+       data = devm_kzalloc(dev, sizeof(struct max6697_data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->type = id->driver_data;
+       data->chip = &max6697_chip_data[data->type];
+
+       i2c_set_clientdata(client, data);
+       mutex_init(&data->update_lock);
+
+       err = max6697_init_chip(client);
+       if (err)
+               return err;
+
+       for (i = 0; i < data->chip->channels; i++) {
+               err = sysfs_create_file(&dev->kobj,
+                                       max6697_attributes[i][0]);
+               if (err)
+                       goto error;
+               err = sysfs_create_file(&dev->kobj,
+                                       max6697_attributes[i][1]);
+               if (err)
+                       goto error;
+               err = sysfs_create_file(&dev->kobj,
+                                       max6697_attributes[i][2]);
+               if (err)
+                       goto error;
+
+               if (data->chip->have_crit & (1 << i)) {
+                       err = sysfs_create_file(&dev->kobj,
+                                               max6697_attributes[i][3]);
+                       if (err)
+                               goto error;
+                       err = sysfs_create_file(&dev->kobj,
+                                               max6697_attributes[i][4]);
+                       if (err)
+                               goto error;
+               }
+               if (data->chip->have_fault & (1 << i)) {
+                       err = sysfs_create_file(&dev->kobj,
+                                               max6697_attributes[i][5]);
+                       if (err)
+                               goto error;
+               }
+       }
+
+       data->hwmon_dev = hwmon_device_register(dev);
+       if (IS_ERR(data->hwmon_dev)) {
+               err = PTR_ERR(data->hwmon_dev);
+               goto error;
+       }
+
+       return 0;
+
+error:
+       max6697_remove_files(client);
+       return err;
+}
+
+static int max6697_remove(struct i2c_client *client)
+{
+       struct max6697_data *data = i2c_get_clientdata(client);
+
+       hwmon_device_unregister(data->hwmon_dev);
+       max6697_remove_files(client);
+
+       return 0;
+}
+
+static const struct i2c_device_id max6697_id[] = {
+       { "max6581", max6581 },
+       { "max6602", max6602 },
+       { "max6622", max6622 },
+       { "max6636", max6636 },
+       { "max6689", max6689 },
+       { "max6693", max6693 },
+       { "max6694", max6694 },
+       { "max6697", max6697 },
+       { "max6698", max6698 },
+       { "max6699", max6699 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, max6697_id);
+
+static struct i2c_driver max6697_driver = {
+       .class = I2C_CLASS_HWMON,
+       .driver = {
+               .name   = "max6697",
+       },
+       .probe = max6697_probe,
+       .remove = max6697_remove,
+       .id_table = max6697_id,
+};
+
+module_i2c_driver(max6697_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("MAX6697 temperature sensor driver");
+MODULE_LICENSE("GPL");
index a87eb8986e360d66cebc31fca6e9ba90af32b538..b5f63f9c0ce1754f6c1b6a614e7c402f24382b81 100644 (file)
@@ -43,7 +43,7 @@ struct ntc_compensation {
  * The following compensation tables are from the specification of Murata NTC
  * Thermistors Datasheet
  */
-const struct ntc_compensation ncpXXwb473[] = {
+static const struct ntc_compensation ncpXXwb473[] = {
        { .temp_C       = -40, .ohm     = 1747920 },
        { .temp_C       = -35, .ohm     = 1245428 },
        { .temp_C       = -30, .ohm     = 898485 },
@@ -79,7 +79,7 @@ const struct ntc_compensation ncpXXwb473[] = {
        { .temp_C       = 120, .ohm     = 1615 },
        { .temp_C       = 125, .ohm     = 1406 },
 };
-const struct ntc_compensation ncpXXwl333[] = {
+static const struct ntc_compensation ncpXXwl333[] = {
        { .temp_C       = -40, .ohm     = 1610154 },
        { .temp_C       = -35, .ohm     = 1130850 },
        { .temp_C       = -30, .ohm     = 802609 },
index 60745a535821d3b787af31a00cfcecd41743c7ec..4f9eb0af5229b0e47dbbc7ad27d51d792de9bbb7 100644 (file)
@@ -72,7 +72,7 @@ config SENSORS_MAX34440
        default n
        help
          If you say yes here you get hardware monitoring support for Maxim
-         MAX34440, MAX34441, and MAX34446.
+         MAX34440, MAX34441, MAX34446, MAX34460, and MAX34461.
 
          This driver can also be built as a module. If so, the module will
          be called max34440.
index 2ada7b021fbe4d1d5570ee2f0eba9b055a4e223a..7e930c3ce1abf011621669e1f054fcdf991c8e13 100644 (file)
@@ -2,6 +2,7 @@
  * Hardware monitoring driver for Maxim MAX34440/MAX34441
  *
  * Copyright (c) 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -25,7 +26,7 @@
 #include <linux/i2c.h>
 #include "pmbus.h"
 
-enum chips { max34440, max34441, max34446 };
+enum chips { max34440, max34441, max34446, max34460, max34461 };
 
 #define MAX34440_MFR_VOUT_PEAK         0xd4
 #define MAX34440_MFR_IOUT_PEAK         0xd5
@@ -87,7 +88,8 @@ static int max34440_read_word_data(struct i2c_client *client, int page, int reg)
                                           MAX34446_MFR_POUT_PEAK);
                break;
        case PMBUS_VIRT_READ_TEMP_AVG:
-               if (data->id != max34446)
+               if (data->id != max34446 && data->id != max34460 &&
+                   data->id != max34461)
                        return -ENXIO;
                ret = pmbus_read_word_data(client, page,
                                           MAX34446_MFR_TEMPERATURE_AVG);
@@ -322,6 +324,73 @@ static struct pmbus_driver_info max34440_info[] = {
                .read_word_data = max34440_read_word_data,
                .write_word_data = max34440_write_word_data,
        },
+       [max34460] = {
+               .pages = 18,
+               .format[PSC_VOLTAGE_OUT] = direct,
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_VOLTAGE_OUT] = 1,
+               .b[PSC_VOLTAGE_OUT] = 0,
+               .R[PSC_VOLTAGE_OUT] = 3,
+               .m[PSC_TEMPERATURE] = 1,
+               .b[PSC_TEMPERATURE] = 0,
+               .R[PSC_TEMPERATURE] = 2,
+               .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[14] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .read_byte_data = max34440_read_byte_data,
+               .read_word_data = max34440_read_word_data,
+               .write_word_data = max34440_write_word_data,
+       },
+       [max34461] = {
+               .pages = 23,
+               .format[PSC_VOLTAGE_OUT] = direct,
+               .format[PSC_TEMPERATURE] = direct,
+               .m[PSC_VOLTAGE_OUT] = 1,
+               .b[PSC_VOLTAGE_OUT] = 0,
+               .R[PSC_VOLTAGE_OUT] = 3,
+               .m[PSC_TEMPERATURE] = 1,
+               .b[PSC_TEMPERATURE] = 0,
+               .R[PSC_TEMPERATURE] = 2,
+               .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[6] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[7] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[8] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[9] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[10] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[11] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[12] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[13] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[14] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               .func[15] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+               /* page 16 is reserved */
+               .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+               .read_byte_data = max34440_read_byte_data,
+               .read_word_data = max34440_read_word_data,
+               .write_word_data = max34440_write_word_data,
+       },
 };
 
 static int max34440_probe(struct i2c_client *client,
@@ -343,6 +412,8 @@ static const struct i2c_device_id max34440_id[] = {
        {"max34440", max34440},
        {"max34441", max34441},
        {"max34446", max34446},
+       {"max34460", max34460},
+       {"max34461", max34461},
        {}
 };
 MODULE_DEVICE_TABLE(i2c, max34440_id);
index 3fe03dc47eb7f3753052dac8eed9d56456d4f529..fa9beb3eb60ca2a33cc6150542dfcbbc90b84219 100644 (file)
@@ -2,6 +2,7 @@
  * pmbus.h - Common defines and structures for PMBus devices
  *
  * Copyright (c) 2010, 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define PMBUS_VIRT_READ_TEMP2_MAX      (PMBUS_VIRT_BASE + 28)
 #define PMBUS_VIRT_RESET_TEMP2_HISTORY (PMBUS_VIRT_BASE + 29)
 
+#define PMBUS_VIRT_READ_VMON           (PMBUS_VIRT_BASE + 30)
+#define PMBUS_VIRT_VMON_UV_WARN_LIMIT  (PMBUS_VIRT_BASE + 31)
+#define PMBUS_VIRT_VMON_OV_WARN_LIMIT  (PMBUS_VIRT_BASE + 32)
+#define PMBUS_VIRT_VMON_UV_FAULT_LIMIT (PMBUS_VIRT_BASE + 33)
+#define PMBUS_VIRT_VMON_OV_FAULT_LIMIT (PMBUS_VIRT_BASE + 34)
+#define PMBUS_VIRT_STATUS_VMON         (PMBUS_VIRT_BASE + 35)
+
 /*
  * CAPABILITY
  */
@@ -317,6 +325,8 @@ enum pmbus_sensor_classes {
 #define PMBUS_HAVE_STATUS_TEMP (1 << 15)
 #define PMBUS_HAVE_STATUS_FAN12        (1 << 16)
 #define PMBUS_HAVE_STATUS_FAN34        (1 << 17)
+#define PMBUS_HAVE_VMON                (1 << 18)
+#define PMBUS_HAVE_STATUS_VMON (1 << 19)
 
 enum pmbus_data_format { linear = 0, direct, vid };
 
@@ -359,6 +369,7 @@ struct pmbus_driver_info {
 
 /* Function declarations */
 
+void pmbus_clear_cache(struct i2c_client *client);
 int pmbus_set_page(struct i2c_client *client, u8 page);
 int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
 int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word);
index 7d19b1bb9ce6acf7de556c93fefb685501a195e1..80eef50c50fd27f9022f3f3176ff6434ad945867 100644 (file)
@@ -2,6 +2,7 @@
  * Hardware monitoring driver for PMBus devices
  *
  * Copyright (c) 2010, 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include "pmbus.h"
 
 /*
- * Constants needed to determine number of sensors, booleans, and labels.
+ * Number of additional attribute pointers to allocate
+ * with each call to krealloc
  */
-#define PMBUS_MAX_INPUT_SENSORS                22      /* 10*volt, 7*curr, 5*power */
-#define PMBUS_VOUT_SENSORS_PER_PAGE    9       /* input, min, max, lcrit,
-                                                  crit, lowest, highest, avg,
-                                                  reset */
-#define PMBUS_IOUT_SENSORS_PER_PAGE    8       /* input, min, max, crit,
-                                                  lowest, highest, avg,
-                                                  reset */
-#define PMBUS_POUT_SENSORS_PER_PAGE    7       /* input, cap, max, crit,
-                                                * highest, avg, reset
-                                                */
-#define PMBUS_MAX_SENSORS_PER_FAN      1       /* input */
-#define PMBUS_MAX_SENSORS_PER_TEMP     9       /* input, min, max, lcrit,
-                                                * crit, lowest, highest, avg,
-                                                * reset
-                                                */
-
-#define PMBUS_MAX_INPUT_BOOLEANS       7       /* v: min_alarm, max_alarm,
-                                                  lcrit_alarm, crit_alarm;
-                                                  c: alarm, crit_alarm;
-                                                  p: crit_alarm */
-#define PMBUS_VOUT_BOOLEANS_PER_PAGE   4       /* min_alarm, max_alarm,
-                                                  lcrit_alarm, crit_alarm */
-#define PMBUS_IOUT_BOOLEANS_PER_PAGE   3       /* alarm, lcrit_alarm,
-                                                  crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE   3       /* cap_alarm, alarm, crit_alarm
-                                                */
-#define PMBUS_MAX_BOOLEANS_PER_FAN     2       /* alarm, fault */
-#define PMBUS_MAX_BOOLEANS_PER_TEMP    4       /* min_alarm, max_alarm,
-                                                  lcrit_alarm, crit_alarm */
-
-#define PMBUS_MAX_INPUT_LABELS         4       /* vin, vcap, iin, pin */
-
-/*
- * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
- * are paged. status_input is unpaged.
- */
-#define PB_NUM_STATUS_REG      (PMBUS_PAGES * 6 + 1)
+#define PMBUS_ATTR_ALLOC_SIZE  32
 
 /*
  * Index into status register array, per status register group
 #define PB_STATUS_IOUT_BASE    (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
 #define PB_STATUS_FAN_BASE     (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
 #define PB_STATUS_FAN34_BASE   (PB_STATUS_FAN_BASE + PMBUS_PAGES)
-#define PB_STATUS_INPUT_BASE   (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
-#define PB_STATUS_TEMP_BASE    (PB_STATUS_INPUT_BASE + 1)
+#define PB_STATUS_TEMP_BASE    (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE   (PB_STATUS_TEMP_BASE + PMBUS_PAGES)
+#define PB_STATUS_VMON_BASE    (PB_STATUS_INPUT_BASE + 1)
+
+#define PB_NUM_STATUS_REG      (PB_STATUS_VMON_BASE + 1)
 
 #define PMBUS_NAME_SIZE                24
 
 struct pmbus_sensor {
+       struct pmbus_sensor *next;
        char name[PMBUS_NAME_SIZE];     /* sysfs sensor name */
-       struct sensor_device_attribute attribute;
+       struct device_attribute attribute;
        u8 page;                /* page number */
        u16 reg;                /* register */
        enum pmbus_sensor_classes class;        /* sensor class */
@@ -94,19 +64,28 @@ struct pmbus_sensor {
        int data;               /* Sensor data.
                                   Negative if there was a read error */
 };
+#define to_pmbus_sensor(_attr) \
+       container_of(_attr, struct pmbus_sensor, attribute)
 
 struct pmbus_boolean {
        char name[PMBUS_NAME_SIZE];     /* sysfs boolean name */
        struct sensor_device_attribute attribute;
+       struct pmbus_sensor *s1;
+       struct pmbus_sensor *s2;
 };
+#define to_pmbus_boolean(_attr) \
+       container_of(_attr, struct pmbus_boolean, attribute)
 
 struct pmbus_label {
        char name[PMBUS_NAME_SIZE];     /* sysfs label name */
-       struct sensor_device_attribute attribute;
+       struct device_attribute attribute;
        char label[PMBUS_NAME_SIZE];    /* label */
 };
+#define to_pmbus_label(_attr) \
+       container_of(_attr, struct pmbus_label, attribute)
 
 struct pmbus_data {
+       struct device *dev;
        struct device *hwmon_dev;
 
        u32 flags;              /* from platform data */
@@ -117,29 +96,9 @@ struct pmbus_data {
 
        int max_attributes;
        int num_attributes;
-       struct attribute **attributes;
        struct attribute_group group;
 
-       /*
-        * Sensors cover both sensor and limit registers.
-        */
-       int max_sensors;
-       int num_sensors;
        struct pmbus_sensor *sensors;
-       /*
-        * Booleans are used for alarms.
-        * Values are determined from status registers.
-        */
-       int max_booleans;
-       int num_booleans;
-       struct pmbus_boolean *booleans;
-       /*
-        * Labels are used to map generic names (e.g., "in1")
-        * to PMBus specific names (e.g., "vin" or "vout1").
-        */
-       int max_labels;
-       int num_labels;
-       struct pmbus_label *labels;
 
        struct mutex update_lock;
        bool valid;
@@ -150,10 +109,19 @@ struct pmbus_data {
         * so we keep them all together.
         */
        u8 status[PB_NUM_STATUS_REG];
+       u8 status_register;
 
        u8 currpage;
 };
 
+void pmbus_clear_cache(struct i2c_client *client)
+{
+       struct pmbus_data *data = i2c_get_clientdata(client);
+
+       data->valid = false;
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_cache);
+
 int pmbus_set_page(struct i2c_client *client, u8 page)
 {
        struct pmbus_data *data = i2c_get_clientdata(client);
@@ -318,9 +286,10 @@ EXPORT_SYMBOL_GPL(pmbus_clear_faults);
 
 static int pmbus_check_status_cml(struct i2c_client *client)
 {
+       struct pmbus_data *data = i2c_get_clientdata(client);
        int status, status2;
 
-       status = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_BYTE);
+       status = _pmbus_read_byte_data(client, -1, data->status_register);
        if (status < 0 || (status & PB_STATUS_CML)) {
                status2 = _pmbus_read_byte_data(client, -1, PMBUS_STATUS_CML);
                if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
@@ -329,29 +298,30 @@ static int pmbus_check_status_cml(struct i2c_client *client)
        return 0;
 }
 
-bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+static bool pmbus_check_register(struct i2c_client *client,
+                                int (*func)(struct i2c_client *client,
+                                            int page, int reg),
+                                int page, int reg)
 {
        int rv;
        struct pmbus_data *data = i2c_get_clientdata(client);
 
-       rv = _pmbus_read_byte_data(client, page, reg);
+       rv = func(client, page, reg);
        if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
                rv = pmbus_check_status_cml(client);
        pmbus_clear_fault_page(client, -1);
        return rv >= 0;
 }
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+       return pmbus_check_register(client, _pmbus_read_byte_data, page, reg);
+}
 EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
 
 bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
 {
-       int rv;
-       struct pmbus_data *data = i2c_get_clientdata(client);
-
-       rv = _pmbus_read_word_data(client, page, reg);
-       if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
-               rv = pmbus_check_status_cml(client);
-       pmbus_clear_fault_page(client, -1);
-       return rv >= 0;
+       return pmbus_check_register(client, _pmbus_read_word_data, page, reg);
 }
 EXPORT_SYMBOL_GPL(pmbus_check_word_register);
 
@@ -363,53 +333,43 @@ const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
 }
 EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
 
+static struct _pmbus_status {
+       u32 func;
+       u16 base;
+       u16 reg;
+} pmbus_status[] = {
+       { PMBUS_HAVE_STATUS_VOUT, PB_STATUS_VOUT_BASE, PMBUS_STATUS_VOUT },
+       { PMBUS_HAVE_STATUS_IOUT, PB_STATUS_IOUT_BASE, PMBUS_STATUS_IOUT },
+       { PMBUS_HAVE_STATUS_TEMP, PB_STATUS_TEMP_BASE,
+         PMBUS_STATUS_TEMPERATURE },
+       { PMBUS_HAVE_STATUS_FAN12, PB_STATUS_FAN_BASE, PMBUS_STATUS_FAN_12 },
+       { PMBUS_HAVE_STATUS_FAN34, PB_STATUS_FAN34_BASE, PMBUS_STATUS_FAN_34 },
+};
+
 static struct pmbus_data *pmbus_update_device(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct pmbus_data *data = i2c_get_clientdata(client);
        const struct pmbus_driver_info *info = data->info;
+       struct pmbus_sensor *sensor;
 
        mutex_lock(&data->update_lock);
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
-               int i;
+               int i, j;
 
-               for (i = 0; i < info->pages; i++)
+               for (i = 0; i < info->pages; i++) {
                        data->status[PB_STATUS_BASE + i]
                            = _pmbus_read_byte_data(client, i,
-                                                   PMBUS_STATUS_BYTE);
-               for (i = 0; i < info->pages; i++) {
-                       if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
-                               continue;
-                       data->status[PB_STATUS_VOUT_BASE + i]
-                         = _pmbus_read_byte_data(client, i, PMBUS_STATUS_VOUT);
-               }
-               for (i = 0; i < info->pages; i++) {
-                       if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
-                               continue;
-                       data->status[PB_STATUS_IOUT_BASE + i]
-                         = _pmbus_read_byte_data(client, i, PMBUS_STATUS_IOUT);
-               }
-               for (i = 0; i < info->pages; i++) {
-                       if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
-                               continue;
-                       data->status[PB_STATUS_TEMP_BASE + i]
-                         = _pmbus_read_byte_data(client, i,
-                                                 PMBUS_STATUS_TEMPERATURE);
-               }
-               for (i = 0; i < info->pages; i++) {
-                       if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
-                               continue;
-                       data->status[PB_STATUS_FAN_BASE + i]
-                         = _pmbus_read_byte_data(client, i,
-                                                 PMBUS_STATUS_FAN_12);
-               }
-
-               for (i = 0; i < info->pages; i++) {
-                       if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
-                               continue;
-                       data->status[PB_STATUS_FAN34_BASE + i]
-                         = _pmbus_read_byte_data(client, i,
-                                                 PMBUS_STATUS_FAN_34);
+                                                   data->status_register);
+                       for (j = 0; j < ARRAY_SIZE(pmbus_status); j++) {
+                               struct _pmbus_status *s = &pmbus_status[j];
+
+                               if (!(info->func[i] & s->func))
+                                       continue;
+                               data->status[s->base + i]
+                                       = _pmbus_read_byte_data(client, i,
+                                                               s->reg);
+                       }
                }
 
                if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
@@ -417,9 +377,12 @@ static struct pmbus_data *pmbus_update_device(struct device *dev)
                          = _pmbus_read_byte_data(client, 0,
                                                  PMBUS_STATUS_INPUT);
 
-               for (i = 0; i < data->num_sensors; i++) {
-                       struct pmbus_sensor *sensor = &data->sensors[i];
+               if (info->func[0] & PMBUS_HAVE_STATUS_VMON)
+                       data->status[PB_STATUS_VMON_BASE]
+                         = _pmbus_read_byte_data(client, 0,
+                                                 PMBUS_VIRT_STATUS_VMON);
 
+               for (sensor = data->sensors; sensor; sensor = sensor->next) {
                        if (!data->valid || sensor->update)
                                sensor->data
                                    = _pmbus_read_word_data(client,
@@ -657,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
 static u16 pmbus_data2reg_vid(struct pmbus_data *data,
                              enum pmbus_sensor_classes class, long val)
 {
-       val = SENSORS_LIMIT(val, 500, 1600);
+       val = clamp_val(val, 500, 1600);
 
        return 2 + DIV_ROUND_CLOSEST((1600 - val) * 100, 625);
 }
@@ -684,25 +647,20 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
 
 /*
  * Return boolean calculated from converted data.
- * <index> defines a status register index and mask, and optionally
- * two sensor indexes.
- * The upper half-word references the two sensors,
- * two sensor indices.
- * The upper half-word references the two optional sensors,
- * the lower half word references status register and mask.
- * The function returns true if (status[reg] & mask) is true and,
- * if specified, if v1 >= v2.
- * To determine if an object exceeds upper limits, specify <v, limit>.
- * To determine if an object exceeds lower limits, specify <limit, v>.
+ * <index> defines a status register index and mask.
+ * The mask is in the lower 8 bits, the register index is in bits 8..23.
  *
- * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
- * index are set. s1 and s2 (the sensor index values) are zero in this case.
- * The function returns true if (status[reg] & mask) is true.
+ * The associated pmbus_boolean structure contains optional pointers to two
+ * sensor attributes. If specified, those attributes are compared against each
+ * other to determine if a limit has been exceeded.
  *
- * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
- * a specified limit has to be performed to determine the boolean result.
+ * If the sensor attribute pointers are NULL, the function returns true if
+ * (status[reg] & mask) is true.
+ *
+ * If sensor attribute pointers are provided, a comparison against a specified
+ * limit has to be performed to determine the boolean result.
  * In this case, the function returns true if v1 >= v2 (where v1 and v2 are
- * sensor values referenced by sensor indices s1 and s2).
+ * sensor values referenced by sensor attribute pointers s1 and s2).
  *
  * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
  * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
@@ -710,11 +668,12 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
  * If a negative value is stored in any of the referenced registers, this value
  * reflects an error code which will be returned.
  */
-static int pmbus_get_boolean(struct pmbus_data *data, int index)
+static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
+                            int index)
 {
-       u8 s1 = (index >> 24) & 0xff;
-       u8 s2 = (index >> 16) & 0xff;
-       u8 reg = (index >> 8) & 0xff;
+       struct pmbus_sensor *s1 = b->s1;
+       struct pmbus_sensor *s2 = b->s2;
+       u16 reg = (index >> 8) & 0xffff;
        u8 mask = index & 0xff;
        int ret, status;
        u8 regval;
@@ -724,21 +683,21 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index)
                return status;
 
        regval = status & mask;
-       if (!s1 && !s2)
+       if (!s1 && !s2) {
                ret = !!regval;
-       else {
+       } else if (!s1 || !s2) {
+               BUG();
+               return 0;
+       } else {
                long v1, v2;
-               struct pmbus_sensor *sensor1, *sensor2;
 
-               sensor1 = &data->sensors[s1];
-               if (sensor1->data < 0)
-                       return sensor1->data;
-               sensor2 = &data->sensors[s2];
-               if (sensor2->data < 0)
-                       return sensor2->data;
+               if (s1->data < 0)
+                       return s1->data;
+               if (s2->data < 0)
+                       return s2->data;
 
-               v1 = pmbus_reg2data(data, sensor1);
-               v2 = pmbus_reg2data(data, sensor2);
+               v1 = pmbus_reg2data(data, s1);
+               v2 = pmbus_reg2data(data, s2);
                ret = !!(regval && v1 >= v2);
        }
        return ret;
@@ -748,23 +707,22 @@ static ssize_t pmbus_show_boolean(struct device *dev,
                                  struct device_attribute *da, char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct pmbus_boolean *boolean = to_pmbus_boolean(attr);
        struct pmbus_data *data = pmbus_update_device(dev);
        int val;
 
-       val = pmbus_get_boolean(data, attr->index);
+       val = pmbus_get_boolean(data, boolean, attr->index);
        if (val < 0)
                return val;
        return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
 static ssize_t pmbus_show_sensor(struct device *dev,
-                                struct device_attribute *da, char *buf)
+                                struct device_attribute *devattr, char *buf)
 {
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
        struct pmbus_data *data = pmbus_update_device(dev);
-       struct pmbus_sensor *sensor;
+       struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
 
-       sensor = &data->sensors[attr->index];
        if (sensor->data < 0)
                return sensor->data;
 
@@ -775,10 +733,9 @@ static ssize_t pmbus_set_sensor(struct device *dev,
                                struct device_attribute *devattr,
                                const char *buf, size_t count)
 {
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct i2c_client *client = to_i2c_client(dev);
        struct pmbus_data *data = i2c_get_clientdata(client);
-       struct pmbus_sensor *sensor = &data->sensors[attr->index];
+       struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
        ssize_t rv = count;
        long val = 0;
        int ret;
@@ -793,7 +750,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
        if (ret < 0)
                rv = ret;
        else
-               data->sensors[attr->index].data = regval;
+               sensor->data = regval;
        mutex_unlock(&data->update_lock);
        return rv;
 }
@@ -801,102 +758,130 @@ static ssize_t pmbus_set_sensor(struct device *dev,
 static ssize_t pmbus_show_label(struct device *dev,
                                struct device_attribute *da, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct pmbus_data *data = i2c_get_clientdata(client);
-       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct pmbus_label *label = to_pmbus_label(da);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       data->labels[attr->index].label);
+       return snprintf(buf, PAGE_SIZE, "%s\n", label->label);
 }
 
-#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set)   \
-do {                                                                   \
-       struct sensor_device_attribute *a                               \
-           = &data->_type##s[data->num_##_type##s].attribute;          \
-       BUG_ON(data->num_attributes >= data->max_attributes);           \
-       sysfs_attr_init(&a->dev_attr.attr);                             \
-       a->dev_attr.attr.name = _name;                                  \
-       a->dev_attr.attr.mode = _mode;                                  \
-       a->dev_attr.show = _show;                                       \
-       a->dev_attr.store = _set;                                       \
-       a->index = _idx;                                                \
-       data->attributes[data->num_attributes] = &a->dev_attr.attr;     \
-       data->num_attributes++;                                         \
-} while (0)
-
-#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx)                   \
-       PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type,               \
-                      pmbus_show_##_type,  NULL)
-
-#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx)                   \
-       PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type,     \
-                      pmbus_show_##_type, pmbus_set_##_type)
-
-static void pmbus_add_boolean(struct pmbus_data *data,
-                             const char *name, const char *type, int seq,
-                             int idx)
+static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
 {
-       struct pmbus_boolean *boolean;
-
-       BUG_ON(data->num_booleans >= data->max_booleans);
-
-       boolean = &data->booleans[data->num_booleans];
+       if (data->num_attributes >= data->max_attributes - 1) {
+               data->max_attributes += PMBUS_ATTR_ALLOC_SIZE;
+               data->group.attrs = krealloc(data->group.attrs,
+                                            sizeof(struct attribute *) *
+                                            data->max_attributes, GFP_KERNEL);
+               if (data->group.attrs == NULL)
+                       return -ENOMEM;
+       }
 
-       snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
-                name, seq, type);
-       PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
-       data->num_booleans++;
+       data->group.attrs[data->num_attributes++] = attr;
+       data->group.attrs[data->num_attributes] = NULL;
+       return 0;
 }
 
-static void pmbus_add_boolean_reg(struct pmbus_data *data,
-                                 const char *name, const char *type,
-                                 int seq, int reg, int bit)
+static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
+                               const char *name,
+                               umode_t mode,
+                               ssize_t (*show)(struct device *dev,
+                                               struct device_attribute *attr,
+                                               char *buf),
+                               ssize_t (*store)(struct device *dev,
+                                                struct device_attribute *attr,
+                                                const char *buf, size_t count))
 {
-       pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+       sysfs_attr_init(&dev_attr->attr);
+       dev_attr->attr.name = name;
+       dev_attr->attr.mode = mode;
+       dev_attr->show = show;
+       dev_attr->store = store;
 }
 
-static void pmbus_add_boolean_cmp(struct pmbus_data *data,
-                                 const char *name, const char *type,
-                                 int seq, int i1, int i2, int reg, int mask)
+static void pmbus_attr_init(struct sensor_device_attribute *a,
+                           const char *name,
+                           umode_t mode,
+                           ssize_t (*show)(struct device *dev,
+                                           struct device_attribute *attr,
+                                           char *buf),
+                           ssize_t (*store)(struct device *dev,
+                                            struct device_attribute *attr,
+                                            const char *buf, size_t count),
+                           int idx)
 {
-       pmbus_add_boolean(data, name, type, seq,
-                         (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+       pmbus_dev_attr_init(&a->dev_attr, name, mode, show, store);
+       a->index = idx;
 }
 
-static void pmbus_add_sensor(struct pmbus_data *data,
+static int pmbus_add_boolean(struct pmbus_data *data,
                             const char *name, const char *type, int seq,
-                            int page, int reg, enum pmbus_sensor_classes class,
-                            bool update, bool readonly)
+                            struct pmbus_sensor *s1,
+                            struct pmbus_sensor *s2,
+                            u16 reg, u8 mask)
+{
+       struct pmbus_boolean *boolean;
+       struct sensor_device_attribute *a;
+
+       boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
+       if (!boolean)
+               return -ENOMEM;
+
+       a = &boolean->attribute;
+
+       snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+                name, seq, type);
+       boolean->s1 = s1;
+       boolean->s2 = s2;
+       pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
+                       (reg << 8) | mask);
+
+       return pmbus_add_attribute(data, &a->dev_attr.attr);
+}
+
+static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
+                                            const char *name, const char *type,
+                                            int seq, int page, int reg,
+                                            enum pmbus_sensor_classes class,
+                                            bool update, bool readonly)
 {
        struct pmbus_sensor *sensor;
+       struct device_attribute *a;
 
-       BUG_ON(data->num_sensors >= data->max_sensors);
+       sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
+       if (!sensor)
+               return NULL;
+       a = &sensor->attribute;
 
-       sensor = &data->sensors[data->num_sensors];
        snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
                 name, seq, type);
        sensor->page = page;
        sensor->reg = reg;
        sensor->class = class;
        sensor->update = update;
-       if (readonly)
-               PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
-                                  data->num_sensors);
-       else
-               PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
-                                  data->num_sensors);
-       data->num_sensors++;
+       pmbus_dev_attr_init(a, sensor->name,
+                           readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
+                           pmbus_show_sensor, pmbus_set_sensor);
+
+       if (pmbus_add_attribute(data, &a->attr))
+               return NULL;
+
+       sensor->next = data->sensors;
+       data->sensors = sensor;
+
+       return sensor;
 }
 
-static void pmbus_add_label(struct pmbus_data *data,
-                           const char *name, int seq,
-                           const char *lstring, int index)
+static int pmbus_add_label(struct pmbus_data *data,
+                          const char *name, int seq,
+                          const char *lstring, int index)
 {
        struct pmbus_label *label;
+       struct device_attribute *a;
 
-       BUG_ON(data->num_labels >= data->max_labels);
+       label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
+       if (!label)
+               return -ENOMEM;
+
+       a = &label->attribute;
 
-       label = &data->labels[data->num_labels];
        snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
        if (!index)
                strncpy(label->label, lstring, sizeof(label->label) - 1);
@@ -904,65 +889,8 @@ static void pmbus_add_label(struct pmbus_data *data,
                snprintf(label->label, sizeof(label->label), "%s%d", lstring,
                         index);
 
-       PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
-       data->num_labels++;
-}
-
-/*
- * Determine maximum number of sensors, booleans, and labels.
- * To keep things simple, only make a rough high estimate.
- */
-static void pmbus_find_max_attr(struct i2c_client *client,
-                               struct pmbus_data *data)
-{
-       const struct pmbus_driver_info *info = data->info;
-       int page, max_sensors, max_booleans, max_labels;
-
-       max_sensors = PMBUS_MAX_INPUT_SENSORS;
-       max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
-       max_labels = PMBUS_MAX_INPUT_LABELS;
-
-       for (page = 0; page < info->pages; page++) {
-               if (info->func[page] & PMBUS_HAVE_VOUT) {
-                       max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
-                       max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
-                       max_labels++;
-               }
-               if (info->func[page] & PMBUS_HAVE_IOUT) {
-                       max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
-                       max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
-                       max_labels++;
-               }
-               if (info->func[page] & PMBUS_HAVE_POUT) {
-                       max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
-                       max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
-                       max_labels++;
-               }
-               if (info->func[page] & PMBUS_HAVE_FAN12) {
-                       max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
-                       max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
-               }
-               if (info->func[page] & PMBUS_HAVE_FAN34) {
-                       max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
-                       max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
-               }
-               if (info->func[page] & PMBUS_HAVE_TEMP) {
-                       max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
-                       max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
-               }
-               if (info->func[page] & PMBUS_HAVE_TEMP2) {
-                       max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
-                       max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
-               }
-               if (info->func[page] & PMBUS_HAVE_TEMP3) {
-                       max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
-                       max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
-               }
-       }
-       data->max_sensors = max_sensors;
-       data->max_booleans = max_booleans;
-       data->max_labels = max_labels;
-       data->max_attributes = max_sensors + max_booleans + max_labels;
+       pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
+       return pmbus_add_attribute(data, &a->attr);
 }
 
 /*
@@ -975,12 +903,12 @@ static void pmbus_find_max_attr(struct i2c_client *client,
  */
 struct pmbus_limit_attr {
        u16 reg;                /* Limit register */
+       u16 sbit;               /* Alarm attribute status bit */
        bool update;            /* True if register needs updates */
        bool low;               /* True if low limit; for limits with compare
                                   functions only */
        const char *attr;       /* Attribute name */
        const char *alarm;      /* Alarm attribute name */
-       u32 sbit;               /* Alarm attribute status bit */
 };
 
 /*
@@ -988,7 +916,9 @@ struct pmbus_limit_attr {
  * description includes a reference to the associated limit attributes.
  */
 struct pmbus_sensor_attr {
-       u8 reg;                         /* sensor register */
+       u16 reg;                        /* sensor register */
+       u8 gbit;                        /* generic status bit */
+       u8 nlimit;                      /* # of limit registers */
        enum pmbus_sensor_classes class;/* sensor class */
        const char *label;              /* sensor label */
        bool paged;                     /* true if paged sensor */
@@ -997,47 +927,47 @@ struct pmbus_sensor_attr {
        u32 func;                       /* sensor mask */
        u32 sfunc;                      /* sensor status mask */
        int sbase;                      /* status base register */
-       u32 gbit;                       /* generic status bit */
        const struct pmbus_limit_attr *limit;/* limit registers */
-       int nlimit;                     /* # of limit registers */
 };
 
 /*
  * Add a set of limit attributes and, if supported, the associated
  * alarm attributes.
+ * returns 0 if no alarm register found, 1 if an alarm register was found,
+ * < 0 on errors.
  */
-static bool pmbus_add_limit_attrs(struct i2c_client *client,
-                                 struct pmbus_data *data,
-                                 const struct pmbus_driver_info *info,
-                                 const char *name, int index, int page,
-                                 int cbase,
-                                 const struct pmbus_sensor_attr *attr)
+static int pmbus_add_limit_attrs(struct i2c_client *client,
+                                struct pmbus_data *data,
+                                const struct pmbus_driver_info *info,
+                                const char *name, int index, int page,
+                                struct pmbus_sensor *base,
+                                const struct pmbus_sensor_attr *attr)
 {
        const struct pmbus_limit_attr *l = attr->limit;
        int nlimit = attr->nlimit;
-       bool have_alarm = false;
-       int i, cindex;
+       int have_alarm = 0;
+       int i, ret;
+       struct pmbus_sensor *curr;
 
        for (i = 0; i < nlimit; i++) {
                if (pmbus_check_word_register(client, page, l->reg)) {
-                       cindex = data->num_sensors;
-                       pmbus_add_sensor(data, name, l->attr, index, page,
-                                        l->reg, attr->class,
-                                        attr->update || l->update,
-                                        false);
+                       curr = pmbus_add_sensor(data, name, l->attr, index,
+                                               page, l->reg, attr->class,
+                                               attr->update || l->update,
+                                               false);
+                       if (!curr)
+                               return -ENOMEM;
                        if (l->sbit && (info->func[page] & attr->sfunc)) {
-                               if (attr->compare) {
-                                       pmbus_add_boolean_cmp(data, name,
-                                               l->alarm, index,
-                                               l->low ? cindex : cbase,
-                                               l->low ? cbase : cindex,
-                                               attr->sbase + page, l->sbit);
-                               } else {
-                                       pmbus_add_boolean_reg(data, name,
-                                               l->alarm, index,
-                                               attr->sbase + page, l->sbit);
-                               }
-                               have_alarm = true;
+                               ret = pmbus_add_boolean(data, name,
+                                       l->alarm, index,
+                                       attr->compare ?  l->low ? curr : base
+                                                     : NULL,
+                                       attr->compare ? l->low ? base : curr
+                                                     : NULL,
+                                       attr->sbase + page, l->sbit);
+                               if (ret)
+                                       return ret;
+                               have_alarm = 1;
                        }
                }
                l++;
@@ -1045,45 +975,59 @@ static bool pmbus_add_limit_attrs(struct i2c_client *client,
        return have_alarm;
 }
 
-static void pmbus_add_sensor_attrs_one(struct i2c_client *client,
-                                      struct pmbus_data *data,
-                                      const struct pmbus_driver_info *info,
-                                      const char *name,
-                                      int index, int page,
-                                      const struct pmbus_sensor_attr *attr)
+static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
+                                     struct pmbus_data *data,
+                                     const struct pmbus_driver_info *info,
+                                     const char *name,
+                                     int index, int page,
+                                     const struct pmbus_sensor_attr *attr)
 {
-       bool have_alarm;
-       int cbase = data->num_sensors;
-
-       if (attr->label)
-               pmbus_add_label(data, name, index, attr->label,
-                               attr->paged ? page + 1 : 0);
-       pmbus_add_sensor(data, name, "input", index, page, attr->reg,
-                        attr->class, true, true);
+       struct pmbus_sensor *base;
+       int ret;
+
+       if (attr->label) {
+               ret = pmbus_add_label(data, name, index, attr->label,
+                                     attr->paged ? page + 1 : 0);
+               if (ret)
+                       return ret;
+       }
+       base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
+                               attr->class, true, true);
+       if (!base)
+               return -ENOMEM;
        if (attr->sfunc) {
-               have_alarm = pmbus_add_limit_attrs(client, data, info, name,
-                                                  index, page, cbase, attr);
+               ret = pmbus_add_limit_attrs(client, data, info, name,
+                                           index, page, base, attr);
+               if (ret < 0)
+                       return ret;
                /*
                 * Add generic alarm attribute only if there are no individual
                 * alarm attributes, if there is a global alarm bit, and if
                 * the generic status register for this page is accessible.
                 */
-               if (!have_alarm && attr->gbit &&
-                   pmbus_check_byte_register(client, page, PMBUS_STATUS_BYTE))
-                       pmbus_add_boolean_reg(data, name, "alarm", index,
-                                             PB_STATUS_BASE + page,
-                                             attr->gbit);
+               if (!ret && attr->gbit &&
+                   pmbus_check_byte_register(client, page,
+                                             data->status_register)) {
+                       ret = pmbus_add_boolean(data, name, "alarm", index,
+                                               NULL, NULL,
+                                               PB_STATUS_BASE + page,
+                                               attr->gbit);
+                       if (ret)
+                               return ret;
+               }
        }
+       return 0;
 }
 
-static void pmbus_add_sensor_attrs(struct i2c_client *client,
-                                  struct pmbus_data *data,
-                                  const char *name,
-                                  const struct pmbus_sensor_attr *attrs,
-                                  int nattrs)
+static int pmbus_add_sensor_attrs(struct i2c_client *client,
+                                 struct pmbus_data *data,
+                                 const char *name,
+                                 const struct pmbus_sensor_attr *attrs,
+                                 int nattrs)
 {
        const struct pmbus_driver_info *info = data->info;
        int index, i;
+       int ret;
 
        index = 1;
        for (i = 0; i < nattrs; i++) {
@@ -1093,12 +1037,16 @@ static void pmbus_add_sensor_attrs(struct i2c_client *client,
                for (page = 0; page < pages; page++) {
                        if (!(info->func[page] & attrs->func))
                                continue;
-                       pmbus_add_sensor_attrs_one(client, data, info, name,
-                                                  index, page, attrs);
+                       ret = pmbus_add_sensor_attrs_one(client, data, info,
+                                                        name, index, page,
+                                                        attrs);
+                       if (ret)
+                               return ret;
                        index++;
                }
                attrs++;
        }
+       return 0;
 }
 
 static const struct pmbus_limit_attr vin_limit_attrs[] = {
@@ -1140,6 +1088,30 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = {
        },
 };
 
+static const struct pmbus_limit_attr vmon_limit_attrs[] = {
+       {
+               .reg = PMBUS_VIRT_VMON_UV_WARN_LIMIT,
+               .attr = "min",
+               .alarm = "min_alarm",
+               .sbit = PB_VOLTAGE_UV_WARNING,
+       }, {
+               .reg = PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
+               .attr = "lcrit",
+               .alarm = "lcrit_alarm",
+               .sbit = PB_VOLTAGE_UV_FAULT,
+       }, {
+               .reg = PMBUS_VIRT_VMON_OV_WARN_LIMIT,
+               .attr = "max",
+               .alarm = "max_alarm",
+               .sbit = PB_VOLTAGE_OV_WARNING,
+       }, {
+               .reg = PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
+               .attr = "crit",
+               .alarm = "crit_alarm",
+               .sbit = PB_VOLTAGE_OV_FAULT,
+       }
+};
+
 static const struct pmbus_limit_attr vout_limit_attrs[] = {
        {
                .reg = PMBUS_VOUT_UV_WARN_LIMIT,
@@ -1190,6 +1162,15 @@ static const struct pmbus_sensor_attr voltage_attributes[] = {
                .gbit = PB_STATUS_VIN_UV,
                .limit = vin_limit_attrs,
                .nlimit = ARRAY_SIZE(vin_limit_attrs),
+       }, {
+               .reg = PMBUS_VIRT_READ_VMON,
+               .class = PSC_VOLTAGE_IN,
+               .label = "vmon",
+               .func = PMBUS_HAVE_VMON,
+               .sfunc = PMBUS_HAVE_STATUS_VMON,
+               .sbase = PB_STATUS_VMON_BASE,
+               .limit = vmon_limit_attrs,
+               .nlimit = ARRAY_SIZE(vmon_limit_attrs),
        }, {
                .reg = PMBUS_READ_VCAP,
                .class = PSC_VOLTAGE_IN,
@@ -1553,12 +1534,13 @@ static const u32 pmbus_fan_status_flags[] = {
 };
 
 /* Fans */
-static void pmbus_add_fan_attributes(struct i2c_client *client,
-                                    struct pmbus_data *data)
+static int pmbus_add_fan_attributes(struct i2c_client *client,
+                                   struct pmbus_data *data)
 {
        const struct pmbus_driver_info *info = data->info;
        int index = 1;
        int page;
+       int ret;
 
        for (page = 0; page < info->pages; page++) {
                int f;
@@ -1584,9 +1566,10 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
                            (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
                                continue;
 
-                       pmbus_add_sensor(data, "fan", "input", index, page,
-                                        pmbus_fan_registers[f], PSC_FAN, true,
-                                        true);
+                       if (pmbus_add_sensor(data, "fan", "input", index,
+                                            page, pmbus_fan_registers[f],
+                                            PSC_FAN, true, true) == NULL)
+                               return -ENOMEM;
 
                        /*
                         * Each fan status register covers multiple fans,
@@ -1601,39 +1584,55 @@ static void pmbus_add_fan_attributes(struct i2c_client *client,
                                        base = PB_STATUS_FAN34_BASE + page;
                                else
                                        base = PB_STATUS_FAN_BASE + page;
-                               pmbus_add_boolean_reg(data, "fan", "alarm",
-                                       index, base,
+                               ret = pmbus_add_boolean(data, "fan",
+                                       "alarm", index, NULL, NULL, base,
                                        PB_FAN_FAN1_WARNING >> (f & 1));
-                               pmbus_add_boolean_reg(data, "fan", "fault",
-                                       index, base,
+                               if (ret)
+                                       return ret;
+                               ret = pmbus_add_boolean(data, "fan",
+                                       "fault", index, NULL, NULL, base,
                                        PB_FAN_FAN1_FAULT >> (f & 1));
+                               if (ret)
+                                       return ret;
                        }
                        index++;
                }
        }
+       return 0;
 }
 
-static void pmbus_find_attributes(struct i2c_client *client,
-                                 struct pmbus_data *data)
+static int pmbus_find_attributes(struct i2c_client *client,
+                                struct pmbus_data *data)
 {
+       int ret;
+
        /* Voltage sensors */
-       pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
-                              ARRAY_SIZE(voltage_attributes));
+       ret = pmbus_add_sensor_attrs(client, data, "in", voltage_attributes,
+                                    ARRAY_SIZE(voltage_attributes));
+       if (ret)
+               return ret;
 
        /* Current sensors */
-       pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
-                              ARRAY_SIZE(current_attributes));
+       ret = pmbus_add_sensor_attrs(client, data, "curr", current_attributes,
+                                    ARRAY_SIZE(current_attributes));
+       if (ret)
+               return ret;
 
        /* Power sensors */
-       pmbus_add_sensor_attrs(client, data, "power", power_attributes,
-                              ARRAY_SIZE(power_attributes));
+       ret = pmbus_add_sensor_attrs(client, data, "power", power_attributes,
+                                    ARRAY_SIZE(power_attributes));
+       if (ret)
+               return ret;
 
        /* Temperature sensors */
-       pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
-                              ARRAY_SIZE(temp_attributes));
+       ret = pmbus_add_sensor_attrs(client, data, "temp", temp_attributes,
+                                    ARRAY_SIZE(temp_attributes));
+       if (ret)
+               return ret;
 
        /* Fans */
-       pmbus_add_fan_attributes(client, data);
+       ret = pmbus_add_fan_attributes(client, data);
+       return ret;
 }
 
 /*
@@ -1672,127 +1671,119 @@ static int pmbus_identify_common(struct i2c_client *client,
                }
        }
 
-       /* Determine maximum number of sensors, booleans, and labels */
-       pmbus_find_max_attr(client, data);
        pmbus_clear_fault_page(client, 0);
        return 0;
 }
 
-int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
-                  struct pmbus_driver_info *info)
+static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+                            struct pmbus_driver_info *info)
 {
-       const struct pmbus_platform_data *pdata = client->dev.platform_data;
-       struct pmbus_data *data;
+       struct device *dev = &client->dev;
        int ret;
 
-       if (!info) {
-               dev_err(&client->dev, "Missing chip information");
-               return -ENODEV;
-       }
-
-       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
-                                    | I2C_FUNC_SMBUS_BYTE_DATA
-                                    | I2C_FUNC_SMBUS_WORD_DATA))
-               return -ENODEV;
-
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
-       if (!data) {
-               dev_err(&client->dev, "No memory to allocate driver data\n");
-               return -ENOMEM;
-       }
-
-       i2c_set_clientdata(client, data);
-       mutex_init(&data->update_lock);
-
-       /* Bail out if PMBus status register does not exist. */
-       if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
-               dev_err(&client->dev, "PMBus status register not found\n");
-               return -ENODEV;
+       /*
+        * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
+        * to use PMBUS_STATUS_WORD instead if that is the case.
+        * Bail out if both registers are not supported.
+        */
+       data->status_register = PMBUS_STATUS_BYTE;
+       ret = i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE);
+       if (ret < 0 || ret == 0xff) {
+               data->status_register = PMBUS_STATUS_WORD;
+               ret = i2c_smbus_read_word_data(client, PMBUS_STATUS_WORD);
+               if (ret < 0 || ret == 0xffff) {
+                       dev_err(dev, "PMBus status register not found\n");
+                       return -ENODEV;
+               }
        }
 
-       if (pdata)
-               data->flags = pdata->flags;
-       data->info = info;
-
        pmbus_clear_faults(client);
 
        if (info->identify) {
                ret = (*info->identify)(client, info);
                if (ret < 0) {
-                       dev_err(&client->dev, "Chip identification failed\n");
+                       dev_err(dev, "Chip identification failed\n");
                        return ret;
                }
        }
 
        if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
-               dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
-                       info->pages);
+               dev_err(dev, "Bad number of PMBus pages: %d\n", info->pages);
                return -ENODEV;
        }
 
        ret = pmbus_identify_common(client, data);
        if (ret < 0) {
-               dev_err(&client->dev, "Failed to identify chip capabilities\n");
+               dev_err(dev, "Failed to identify chip capabilities\n");
                return ret;
        }
+       return 0;
+}
 
-       ret = -ENOMEM;
-       data->sensors = devm_kzalloc(&client->dev, sizeof(struct pmbus_sensor)
-                                    * data->max_sensors, GFP_KERNEL);
-       if (!data->sensors) {
-               dev_err(&client->dev, "No memory to allocate sensor data\n");
-               return -ENOMEM;
-       }
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+                  struct pmbus_driver_info *info)
+{
+       struct device *dev = &client->dev;
+       const struct pmbus_platform_data *pdata = dev->platform_data;
+       struct pmbus_data *data;
+       int ret;
 
-       data->booleans = devm_kzalloc(&client->dev, sizeof(struct pmbus_boolean)
-                                * data->max_booleans, GFP_KERNEL);
-       if (!data->booleans) {
-               dev_err(&client->dev, "No memory to allocate boolean data\n");
-               return -ENOMEM;
-       }
+       if (!info)
+               return -ENODEV;
 
-       data->labels = devm_kzalloc(&client->dev, sizeof(struct pmbus_label)
-                                   * data->max_labels, GFP_KERNEL);
-       if (!data->labels) {
-               dev_err(&client->dev, "No memory to allocate label data\n");
-               return -ENOMEM;
-       }
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+                                    | I2C_FUNC_SMBUS_BYTE_DATA
+                                    | I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
 
-       data->attributes = devm_kzalloc(&client->dev, sizeof(struct attribute *)
-                                       * data->max_attributes, GFP_KERNEL);
-       if (!data->attributes) {
-               dev_err(&client->dev, "No memory to allocate attribute data\n");
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
                return -ENOMEM;
-       }
 
-       pmbus_find_attributes(client, data);
+       i2c_set_clientdata(client, data);
+       mutex_init(&data->update_lock);
+       data->dev = dev;
+
+       if (pdata)
+               data->flags = pdata->flags;
+       data->info = info;
+
+       ret = pmbus_init_common(client, data, info);
+       if (ret < 0)
+               return ret;
+
+       ret = pmbus_find_attributes(client, data);
+       if (ret)
+               goto out_kfree;
 
        /*
         * If there are no attributes, something is wrong.
         * Bail out instead of trying to register nothing.
         */
        if (!data->num_attributes) {
-               dev_err(&client->dev, "No attributes found\n");
-               return -ENODEV;
+               dev_err(dev, "No attributes found\n");
+               ret = -ENODEV;
+               goto out_kfree;
        }
 
        /* Register sysfs hooks */
-       data->group.attrs = data->attributes;
-       ret = sysfs_create_group(&client->dev.kobj, &data->group);
+       ret = sysfs_create_group(&dev->kobj, &data->group);
        if (ret) {
-               dev_err(&client->dev, "Failed to create sysfs entries\n");
-               return ret;
+               dev_err(dev, "Failed to create sysfs entries\n");
+               goto out_kfree;
        }
-       data->hwmon_dev = hwmon_device_register(&client->dev);
+       data->hwmon_dev = hwmon_device_register(dev);
        if (IS_ERR(data->hwmon_dev)) {
                ret = PTR_ERR(data->hwmon_dev);
-               dev_err(&client->dev, "Failed to register hwmon device\n");
+               dev_err(dev, "Failed to register hwmon device\n");
                goto out_hwmon_device_register;
        }
        return 0;
 
 out_hwmon_device_register:
-       sysfs_remove_group(&client->dev.kobj, &data->group);
+       sysfs_remove_group(&dev->kobj, &data->group);
+out_kfree:
+       kfree(data->group.attrs);
        return ret;
 }
 EXPORT_SYMBOL_GPL(pmbus_do_probe);
@@ -1802,6 +1793,7 @@ int pmbus_do_remove(struct i2c_client *client)
        struct pmbus_data *data = i2c_get_clientdata(client);
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&client->dev.kobj, &data->group);
+       kfree(data->group.attrs);
        return 0;
 }
 EXPORT_SYMBOL_GPL(pmbus_do_remove);
index fc5eed8e85bb94f6e703a50dfe6bcf814353a801..8196441212592d14a6cd49cacc041443918321c9 100644 (file)
@@ -2,6 +2,7 @@
  * Hardware monitoring driver for ZL6100 and compatibles
  *
  * Copyright (c) 2011 Ericsson AB.
+ * Copyright (c) 2012 Guenter Roeck
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -45,12 +46,87 @@ struct zl6100_data {
 
 #define ZL6100_MFR_XTEMP_ENABLE                (1 << 7)
 
+#define MFR_VMON_OV_FAULT_LIMIT                0xf5
+#define MFR_VMON_UV_FAULT_LIMIT                0xf6
+#define MFR_READ_VMON                  0xf7
+
+#define VMON_UV_WARNING                        (1 << 5)
+#define VMON_OV_WARNING                        (1 << 4)
+#define VMON_UV_FAULT                  (1 << 1)
+#define VMON_OV_FAULT                  (1 << 0)
+
 #define ZL6100_WAIT_TIME               1000    /* uS   */
 
 static ushort delay = ZL6100_WAIT_TIME;
 module_param(delay, ushort, 0644);
 MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
 
+/* Convert linear sensor value to milli-units */
+static long zl6100_l2d(s16 l)
+{
+       s16 exponent;
+       s32 mantissa;
+       long val;
+
+       exponent = l >> 11;
+       mantissa = ((s16)((l & 0x7ff) << 5)) >> 5;
+
+       val = mantissa;
+
+       /* scale result to milli-units */
+       val = val * 1000L;
+
+       if (exponent >= 0)
+               val <<= exponent;
+       else
+               val >>= -exponent;
+
+       return val;
+}
+
+#define MAX_MANTISSA   (1023 * 1000)
+#define MIN_MANTISSA   (511 * 1000)
+
+static u16 zl6100_d2l(long val)
+{
+       s16 exponent = 0, mantissa;
+       bool negative = false;
+
+       /* simple case */
+       if (val == 0)
+               return 0;
+
+       if (val < 0) {
+               negative = true;
+               val = -val;
+       }
+
+       /* Reduce large mantissa until it fits into 10 bit */
+       while (val >= MAX_MANTISSA && exponent < 15) {
+               exponent++;
+               val >>= 1;
+       }
+       /* Increase small mantissa to improve precision */
+       while (val < MIN_MANTISSA && exponent > -15) {
+               exponent--;
+               val <<= 1;
+       }
+
+       /* Convert mantissa from milli-units to units */
+       mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+       /* Ensure that resulting number is within range */
+       if (mantissa > 0x3ff)
+               mantissa = 0x3ff;
+
+       /* restore sign */
+       if (negative)
+               mantissa = -mantissa;
+
+       /* Convert to 5 bit exponent, 11 bit mantissa */
+       return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
 /* Some chips need a delay between accesses */
 static inline void zl6100_wait(const struct zl6100_data *data)
 {
@@ -65,9 +141,9 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
 {
        const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
        struct zl6100_data *data = to_zl6100_data(info);
-       int ret;
+       int ret, vreg;
 
-       if (page || reg >= PMBUS_VIRT_BASE)
+       if (page > 0)
                return -ENXIO;
 
        if (data->id == zl2005) {
@@ -83,9 +159,39 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
                }
        }
 
+       switch (reg) {
+       case PMBUS_VIRT_READ_VMON:
+               vreg = MFR_READ_VMON;
+               break;
+       case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+       case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
+               vreg = MFR_VMON_OV_FAULT_LIMIT;
+               break;
+       case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+       case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
+               vreg = MFR_VMON_UV_FAULT_LIMIT;
+               break;
+       default:
+               if (reg >= PMBUS_VIRT_BASE)
+                       return -ENXIO;
+               vreg = reg;
+               break;
+       }
+
        zl6100_wait(data);
-       ret = pmbus_read_word_data(client, page, reg);
+       ret = pmbus_read_word_data(client, page, vreg);
        data->access = ktime_get();
+       if (ret < 0)
+               return ret;
+
+       switch (reg) {
+       case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+               ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 9, 10));
+               break;
+       case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+               ret = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(ret) * 11, 10));
+               break;
+       }
 
        return ret;
 }
@@ -94,13 +200,35 @@ static int zl6100_read_byte_data(struct i2c_client *client, int page, int reg)
 {
        const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
        struct zl6100_data *data = to_zl6100_data(info);
-       int ret;
+       int ret, status;
 
        if (page > 0)
                return -ENXIO;
 
        zl6100_wait(data);
-       ret = pmbus_read_byte_data(client, page, reg);
+
+       switch (reg) {
+       case PMBUS_VIRT_STATUS_VMON:
+               ret = pmbus_read_byte_data(client, 0,
+                                          PMBUS_STATUS_MFR_SPECIFIC);
+               if (ret < 0)
+                       break;
+
+               status = 0;
+               if (ret & VMON_UV_WARNING)
+                       status |= PB_VOLTAGE_UV_WARNING;
+               if (ret & VMON_OV_WARNING)
+                       status |= PB_VOLTAGE_OV_WARNING;
+               if (ret & VMON_UV_FAULT)
+                       status |= PB_VOLTAGE_UV_FAULT;
+               if (ret & VMON_OV_FAULT)
+                       status |= PB_VOLTAGE_OV_FAULT;
+               ret = status;
+               break;
+       default:
+               ret = pmbus_read_byte_data(client, page, reg);
+               break;
+       }
        data->access = ktime_get();
 
        return ret;
@@ -111,13 +239,38 @@ static int zl6100_write_word_data(struct i2c_client *client, int page, int reg,
 {
        const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
        struct zl6100_data *data = to_zl6100_data(info);
-       int ret;
+       int ret, vreg;
 
-       if (page || reg >= PMBUS_VIRT_BASE)
+       if (page > 0)
                return -ENXIO;
 
+       switch (reg) {
+       case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
+               word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 9));
+               vreg = MFR_VMON_OV_FAULT_LIMIT;
+               pmbus_clear_cache(client);
+               break;
+       case PMBUS_VIRT_VMON_OV_FAULT_LIMIT:
+               vreg = MFR_VMON_OV_FAULT_LIMIT;
+               pmbus_clear_cache(client);
+               break;
+       case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
+               word = zl6100_d2l(DIV_ROUND_CLOSEST(zl6100_l2d(word) * 10, 11));
+               vreg = MFR_VMON_UV_FAULT_LIMIT;
+               pmbus_clear_cache(client);
+               break;
+       case PMBUS_VIRT_VMON_UV_FAULT_LIMIT:
+               vreg = MFR_VMON_UV_FAULT_LIMIT;
+               pmbus_clear_cache(client);
+               break;
+       default:
+               if (reg >= PMBUS_VIRT_BASE)
+                       return -ENXIO;
+               vreg = reg;
+       }
+
        zl6100_wait(data);
-       ret = pmbus_write_word_data(client, page, reg, word);
+       ret = pmbus_write_word_data(client, page, vreg, word);
        data->access = ktime_get();
 
        return ret;
@@ -225,6 +378,13 @@ static int zl6100_probe(struct i2c_client *client,
          | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
          | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
 
+       /*
+        * ZL2004, ZL9101M, and ZL9117M support monitoring an extra voltage
+        * (VMON for ZL2004, VDRV for ZL9101M and ZL9117M). Report it as vmon.
+        */
+       if (data->id == zl2004 || data->id == zl9101 || data->id == zl9117)
+               info->func[0] |= PMBUS_HAVE_VMON | PMBUS_HAVE_STATUS_VMON;
+
        ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
        if (ret < 0)
                return ret;
index 1c85d39df171670a4727cb95484d3cd5acbf5721..bfe326e896dfd6d277ed22aec18ff9431ae8b8c1 100644 (file)
@@ -139,12 +139,12 @@ static const u8 sht15_crc8_table[] = {
  * @reg:               associated regulator (if specified).
  * @nb:                        notifier block to handle notifications of voltage
  *                      changes.
- * @supply_uV:         local copy of supply voltage used to allow use of
+ * @supply_uv:         local copy of supply voltage used to allow use of
  *                      regulator consumer if available.
- * @supply_uV_valid:   indicates that an updated value has not yet been
+ * @supply_uv_valid:   indicates that an updated value has not yet been
  *                     obtained from the regulator and so any calculations
  *                     based upon it will be invalid.
- * @update_supply_work:        work struct that is used to update the supply_uV.
+ * @update_supply_work:        work struct that is used to update the supply_uv.
  * @interrupt_handled: flag used to indicate a handler has been scheduled.
  */
 struct sht15_data {
@@ -166,8 +166,8 @@ struct sht15_data {
        struct device                   *hwmon_dev;
        struct regulator                *reg;
        struct notifier_block           nb;
-       int                             supply_uV;
-       bool                            supply_uV_valid;
+       int                             supply_uv;
+       bool                            supply_uv_valid;
        struct work_struct              update_supply_work;
        atomic_t                        interrupt_handled;
 };
@@ -212,11 +212,13 @@ static u8 sht15_crc8(struct sht15_data *data,
  *
  * This implements section 3.4 of the data sheet
  */
-static void sht15_connection_reset(struct sht15_data *data)
+static int sht15_connection_reset(struct sht15_data *data)
 {
-       int i;
+       int i, err;
 
-       gpio_direction_output(data->pdata->gpio_data, 1);
+       err = gpio_direction_output(data->pdata->gpio_data, 1);
+       if (err)
+               return err;
        ndelay(SHT15_TSCKL);
        gpio_set_value(data->pdata->gpio_sck, 0);
        ndelay(SHT15_TSCKL);
@@ -226,6 +228,7 @@ static void sht15_connection_reset(struct sht15_data *data)
                gpio_set_value(data->pdata->gpio_sck, 0);
                ndelay(SHT15_TSCKL);
        }
+       return 0;
 }
 
 /**
@@ -251,10 +254,14 @@ static inline void sht15_send_bit(struct sht15_data *data, int val)
  * conservative ones used in implementation. This implements
  * figure 12 on the data sheet.
  */
-static void sht15_transmission_start(struct sht15_data *data)
+static int sht15_transmission_start(struct sht15_data *data)
 {
+       int err;
+
        /* ensure data is high and output */
-       gpio_direction_output(data->pdata->gpio_data, 1);
+       err = gpio_direction_output(data->pdata->gpio_data, 1);
+       if (err)
+               return err;
        ndelay(SHT15_TSU);
        gpio_set_value(data->pdata->gpio_sck, 0);
        ndelay(SHT15_TSCKL);
@@ -270,6 +277,7 @@ static void sht15_transmission_start(struct sht15_data *data)
        ndelay(SHT15_TSU);
        gpio_set_value(data->pdata->gpio_sck, 0);
        ndelay(SHT15_TSCKL);
+       return 0;
 }
 
 /**
@@ -293,13 +301,19 @@ static void sht15_send_byte(struct sht15_data *data, u8 byte)
  */
 static int sht15_wait_for_response(struct sht15_data *data)
 {
-       gpio_direction_input(data->pdata->gpio_data);
+       int err;
+
+       err = gpio_direction_input(data->pdata->gpio_data);
+       if (err)
+               return err;
        gpio_set_value(data->pdata->gpio_sck, 1);
        ndelay(SHT15_TSCKH);
        if (gpio_get_value(data->pdata->gpio_data)) {
                gpio_set_value(data->pdata->gpio_sck, 0);
                dev_err(data->dev, "Command not acknowledged\n");
-               sht15_connection_reset(data);
+               err = sht15_connection_reset(data);
+               if (err)
+                       return err;
                return -EIO;
        }
        gpio_set_value(data->pdata->gpio_sck, 0);
@@ -317,12 +331,13 @@ static int sht15_wait_for_response(struct sht15_data *data)
  */
 static int sht15_send_cmd(struct sht15_data *data, u8 cmd)
 {
-       int ret = 0;
+       int err;
 
-       sht15_transmission_start(data);
+       err = sht15_transmission_start(data);
+       if (err)
+               return err;
        sht15_send_byte(data, cmd);
-       ret = sht15_wait_for_response(data);
-       return ret;
+       return sht15_wait_for_response(data);
 }
 
 /**
@@ -352,9 +367,13 @@ static int sht15_soft_reset(struct sht15_data *data)
  * Each byte of data is acknowledged by pulling the data line
  * low for one clock pulse.
  */
-static void sht15_ack(struct sht15_data *data)
+static int sht15_ack(struct sht15_data *data)
 {
-       gpio_direction_output(data->pdata->gpio_data, 0);
+       int err;
+
+       err = gpio_direction_output(data->pdata->gpio_data, 0);
+       if (err)
+               return err;
        ndelay(SHT15_TSU);
        gpio_set_value(data->pdata->gpio_sck, 1);
        ndelay(SHT15_TSU);
@@ -362,7 +381,7 @@ static void sht15_ack(struct sht15_data *data)
        ndelay(SHT15_TSU);
        gpio_set_value(data->pdata->gpio_data, 1);
 
-       gpio_direction_input(data->pdata->gpio_data);
+       return gpio_direction_input(data->pdata->gpio_data);
 }
 
 /**
@@ -371,14 +390,19 @@ static void sht15_ack(struct sht15_data *data)
  *
  * This is basically a NAK (single clock pulse, data high).
  */
-static void sht15_end_transmission(struct sht15_data *data)
+static int sht15_end_transmission(struct sht15_data *data)
 {
-       gpio_direction_output(data->pdata->gpio_data, 1);
+       int err;
+
+       err = gpio_direction_output(data->pdata->gpio_data, 1);
+       if (err)
+               return err;
        ndelay(SHT15_TSU);
        gpio_set_value(data->pdata->gpio_sck, 1);
        ndelay(SHT15_TSCKH);
        gpio_set_value(data->pdata->gpio_sck, 0);
        ndelay(SHT15_TSCKL);
+       return 0;
 }
 
 /**
@@ -410,17 +434,19 @@ static u8 sht15_read_byte(struct sht15_data *data)
  */
 static int sht15_send_status(struct sht15_data *data, u8 status)
 {
-       int ret;
-
-       ret = sht15_send_cmd(data, SHT15_WRITE_STATUS);
-       if (ret)
-               return ret;
-       gpio_direction_output(data->pdata->gpio_data, 1);
+       int err;
+
+       err = sht15_send_cmd(data, SHT15_WRITE_STATUS);
+       if (err)
+               return err;
+       err = gpio_direction_output(data->pdata->gpio_data, 1);
+       if (err)
+               return err;
        ndelay(SHT15_TSU);
        sht15_send_byte(data, status);
-       ret = sht15_wait_for_response(data);
-       if (ret)
-               return ret;
+       err = sht15_wait_for_response(data);
+       if (err)
+               return err;
 
        data->val_status = status;
        return 0;
@@ -446,7 +472,7 @@ static int sht15_update_status(struct sht15_data *data)
                        || !data->status_valid) {
                ret = sht15_send_cmd(data, SHT15_READ_STATUS);
                if (ret)
-                       goto error_ret;
+                       goto unlock;
                status = sht15_read_byte(data);
 
                if (data->checksumming) {
@@ -458,7 +484,9 @@ static int sht15_update_status(struct sht15_data *data)
                                        == dev_checksum);
                }
 
-               sht15_end_transmission(data);
+               ret = sht15_end_transmission(data);
+               if (ret)
+                       goto unlock;
 
                /*
                 * Perform checksum validation on the received data.
@@ -469,27 +497,27 @@ static int sht15_update_status(struct sht15_data *data)
                        previous_config = data->val_status & 0x07;
                        ret = sht15_soft_reset(data);
                        if (ret)
-                               goto error_ret;
+                               goto unlock;
                        if (previous_config) {
                                ret = sht15_send_status(data, previous_config);
                                if (ret) {
                                        dev_err(data->dev,
                                                "CRC validation failed, unable "
                                                "to restore device settings\n");
-                                       goto error_ret;
+                                       goto unlock;
                                }
                        }
                        ret = -EAGAIN;
-                       goto error_ret;
+                       goto unlock;
                }
 
                data->val_status = status;
                data->status_valid = true;
                data->last_status = jiffies;
        }
-error_ret:
-       mutex_unlock(&data->read_lock);
 
+unlock:
+       mutex_unlock(&data->read_lock);
        return ret;
 }
 
@@ -511,7 +539,9 @@ static int sht15_measurement(struct sht15_data *data,
        if (ret)
                return ret;
 
-       gpio_direction_input(data->pdata->gpio_data);
+       ret = gpio_direction_input(data->pdata->gpio_data);
+       if (ret)
+               return ret;
        atomic_set(&data->interrupt_handled, 0);
 
        enable_irq(gpio_to_irq(data->pdata->gpio_data));
@@ -524,9 +554,14 @@ static int sht15_measurement(struct sht15_data *data,
        ret = wait_event_timeout(data->wait_queue,
                                 (data->state == SHT15_READING_NOTHING),
                                 msecs_to_jiffies(timeout_msecs));
-       if (ret == 0) {/* timeout occurred */
+       if (data->state != SHT15_READING_NOTHING) { /* I/O error occurred */
+               data->state = SHT15_READING_NOTHING;
+               return -EIO;
+       } else if (ret == 0) { /* timeout occurred */
                disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
-               sht15_connection_reset(data);
+               ret = sht15_connection_reset(data);
+               if (ret)
+                       return ret;
                return -ETIME;
        }
 
@@ -570,17 +605,17 @@ static int sht15_update_measurements(struct sht15_data *data)
                data->state = SHT15_READING_HUMID;
                ret = sht15_measurement(data, SHT15_MEASURE_RH, 160);
                if (ret)
-                       goto error_ret;
+                       goto unlock;
                data->state = SHT15_READING_TEMP;
                ret = sht15_measurement(data, SHT15_MEASURE_TEMP, 400);
                if (ret)
-                       goto error_ret;
+                       goto unlock;
                data->measurements_valid = true;
                data->last_measurement = jiffies;
        }
-error_ret:
-       mutex_unlock(&data->read_lock);
 
+unlock:
+       mutex_unlock(&data->read_lock);
        return ret;
 }
 
@@ -598,8 +633,8 @@ static inline int sht15_calc_temp(struct sht15_data *data)
 
        for (i = ARRAY_SIZE(temppoints) - 1; i > 0; i--)
                /* Find pointer to interpolate */
-               if (data->supply_uV > temppoints[i - 1].vdd) {
-                       d1 = (data->supply_uV - temppoints[i - 1].vdd)
+               if (data->supply_uv > temppoints[i - 1].vdd) {
+                       d1 = (data->supply_uv - temppoints[i - 1].vdd)
                                * (temppoints[i].d1 - temppoints[i - 1].d1)
                                / (temppoints[i].vdd - temppoints[i - 1].vdd)
                                + temppoints[i - 1].d1;
@@ -818,7 +853,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
        /* Read the data back from the device */
        val = sht15_read_byte(data);
        val <<= 8;
-       sht15_ack(data);
+       if (sht15_ack(data))
+               goto wakeup;
        val |= sht15_read_byte(data);
 
        if (data->checksumming) {
@@ -826,7 +862,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
                 * Ask the device for a checksum and read it back.
                 * Note: the device sends the checksum byte reversed.
                 */
-               sht15_ack(data);
+               if (sht15_ack(data))
+                       goto wakeup;
                dev_checksum = sht15_reverse(sht15_read_byte(data));
                checksum_vals[0] = (data->state == SHT15_READING_TEMP) ?
                        SHT15_MEASURE_TEMP : SHT15_MEASURE_RH;
@@ -837,7 +874,8 @@ static void sht15_bh_read_data(struct work_struct *work_s)
        }
 
        /* Tell the device we are done */
-       sht15_end_transmission(data);
+       if (sht15_end_transmission(data))
+               goto wakeup;
 
        switch (data->state) {
        case SHT15_READING_TEMP:
@@ -851,6 +889,7 @@ static void sht15_bh_read_data(struct work_struct *work_s)
        }
 
        data->state = SHT15_READING_NOTHING;
+wakeup:
        wake_up(&data->wait_queue);
 }
 
@@ -859,7 +898,7 @@ static void sht15_update_voltage(struct work_struct *work_s)
        struct sht15_data *data
                = container_of(work_s, struct sht15_data,
                               update_supply_work);
-       data->supply_uV = regulator_get_voltage(data->reg);
+       data->supply_uv = regulator_get_voltage(data->reg);
 }
 
 /**
@@ -878,7 +917,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
        struct sht15_data *data = container_of(nb, struct sht15_data, nb);
 
        if (event == REGULATOR_EVENT_VOLTAGE_CHANGE)
-               data->supply_uV_valid = false;
+               data->supply_uv_valid = false;
        schedule_work(&data->update_supply_work);
 
        return NOTIFY_OK;
@@ -906,7 +945,7 @@ static int sht15_probe(struct platform_device *pdev)
                return -EINVAL;
        }
        data->pdata = pdev->dev.platform_data;
-       data->supply_uV = data->pdata->supply_mv * 1000;
+       data->supply_uv = data->pdata->supply_mv * 1000;
        if (data->pdata->checksum)
                data->checksumming = true;
        if (data->pdata->no_otp_reload)
@@ -924,7 +963,7 @@ static int sht15_probe(struct platform_device *pdev)
 
                voltage = regulator_get_voltage(data->reg);
                if (voltage)
-                       data->supply_uV = voltage;
+                       data->supply_uv = voltage;
 
                regulator_enable(data->reg);
                /*
@@ -942,17 +981,17 @@ static int sht15_probe(struct platform_device *pdev)
        }
 
        /* Try requesting the GPIOs */
-       ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_sck, "SHT15 sck");
+       ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck,
+                       GPIOF_OUT_INIT_LOW, "SHT15 sck");
        if (ret) {
-               dev_err(&pdev->dev, "gpio request failed\n");
+               dev_err(&pdev->dev, "clock line GPIO request failed\n");
                goto err_release_reg;
        }
-       gpio_direction_output(data->pdata->gpio_sck, 0);
 
        ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data,
                                "SHT15 data");
        if (ret) {
-               dev_err(&pdev->dev, "gpio request failed\n");
+               dev_err(&pdev->dev, "data line GPIO request failed\n");
                goto err_release_reg;
        }
 
@@ -966,7 +1005,9 @@ static int sht15_probe(struct platform_device *pdev)
                goto err_release_reg;
        }
        disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
-       sht15_connection_reset(data);
+       ret = sht15_connection_reset(data);
+       if (ret)
+               goto err_release_reg;
        ret = sht15_soft_reset(data);
        if (ret)
                goto err_release_reg;
index 06ce3c911db9bbc1bdc1169ec420a79b3b818f55..c35847a1a0a32c27c8d2aa1d87c6c179ed13f91e 100644 (file)
@@ -132,7 +132,7 @@ static struct platform_device *pdev;
  */
 static inline u8 IN_TO_REG(unsigned long val)
 {
-       unsigned long nval = SENSORS_LIMIT(val, 0, 4080);
+       unsigned long nval = clamp_val(val, 0, 4080);
        return (nval + 8) / 16;
 }
 #define IN_FROM_REG(val) ((val) *  16)
@@ -141,7 +141,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
 {
        if (rpm <= 0)
                return 255;
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 static inline int FAN_FROM_REG(u8 val, int div)
@@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val)
 }
 static inline s8 TEMP_TO_REG(int val)
 {
-       int nval = SENSORS_LIMIT(val, -54120, 157530) ;
+       int nval = clamp_val(val, -54120, 157530) ;
        return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
 }
 
index dba0c567e7a1a730f60f2ffaff6012a12d7df3dd..6d8255ccf07afb1a9d31955f82e91392a824a4ba 100644 (file)
@@ -326,7 +326,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
        /* Preserve fan min */
        tmp = 192 - (old_div * (192 - data->fan_preload[nr])
                     + new_div / 2) / new_div;
-       data->fan_preload[nr] = SENSORS_LIMIT(tmp, 0, 191);
+       data->fan_preload[nr] = clamp_val(tmp, 0, 191);
        smsc47m1_write_value(data, SMSC47M1_REG_FAN_PRELOAD[nr],
                             data->fan_preload[nr]);
        mutex_unlock(&data->update_lock);
index 36a3478d0799eba500ec7242c5533314022aebaf..efee4c59239fcff8aa7b675c01cb5b9ac6bab5bd 100644 (file)
@@ -77,7 +77,7 @@ static inline unsigned int IN_FROM_REG(u8 reg, int n)
 
 static inline u8 IN_TO_REG(unsigned long val, int n)
 {
-       return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
+       return clamp_val(SCALE(val, 192, nom_mv[n]), 0, 255);
 }
 
 /*
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000);
+       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
index 3c2c48d904e658736605ee95e5b21f08cd1ccf98..4b59eb53b18ac38ccaeb949f9feab2ba3fdc097e 100644 (file)
@@ -134,7 +134,7 @@ static ssize_t set_analog_out(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->analog_out = SENSORS_LIMIT(tmp, 0, 255);
+       data->analog_out = clamp_val(tmp, 0, 255);
        i2c_smbus_write_byte_data(client, THMC50_REG_ANALOG_OUT,
                                  data->analog_out);
 
@@ -187,7 +187,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[nr] = SENSORS_LIMIT(val / 1000, -128, 127);
+       data->temp_min[nr] = clamp_val(val / 1000, -128, 127);
        i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MIN[nr],
                                  data->temp_min[nr]);
        mutex_unlock(&data->update_lock);
@@ -216,7 +216,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[nr] = SENSORS_LIMIT(val / 1000, -128, 127);
+       data->temp_max[nr] = clamp_val(val / 1000, -128, 127);
        i2c_smbus_write_byte_data(client, THMC50_REG_TEMP_MAX[nr],
                                  data->temp_max[nr]);
        mutex_unlock(&data->update_lock);
index b10c3d36ccbc3b8b77410701b763b2890ed5467d..523dd89ba498c3dff27cbe87677fb8d2095c39c0 100644 (file)
@@ -115,7 +115,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
 
        if (kstrtol(buf, 10, &val) < 0)
                return -EINVAL;
-       val = SENSORS_LIMIT(val, -256000, 255000);
+       val = clamp_val(val, -256000, 255000);
 
        mutex_lock(&tmp102->lock);
        tmp102->temp[sda->index] = val;
index e6205487516480ddfbd56acd3adf2828b45f9c74..c85f6967ccc34534da241368af7a440d267bf6bf 100644 (file)
@@ -142,10 +142,10 @@ static int tmp401_register_to_temp(u16 reg, u8 config)
 static u16 tmp401_temp_to_register(long temp, u8 config)
 {
        if (config & TMP401_CONFIG_RANGE) {
-               temp = SENSORS_LIMIT(temp, -64000, 191000);
+               temp = clamp_val(temp, -64000, 191000);
                temp += 64000;
        } else
-               temp = SENSORS_LIMIT(temp, 0, 127000);
+               temp = clamp_val(temp, 0, 127000);
 
        return (temp * 160 + 312) / 625;
 }
@@ -163,10 +163,10 @@ static int tmp401_crit_register_to_temp(u8 reg, u8 config)
 static u8 tmp401_crit_temp_to_register(long temp, u8 config)
 {
        if (config & TMP401_CONFIG_RANGE) {
-               temp = SENSORS_LIMIT(temp, -64000, 191000);
+               temp = clamp_val(temp, -64000, 191000);
                temp += 64000;
        } else
-               temp = SENSORS_LIMIT(temp, 0, 127000);
+               temp = clamp_val(temp, 0, 127000);
 
        return (temp + 500) / 1000;
 }
@@ -417,14 +417,14 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
                return -EINVAL;
 
        if (data->config & TMP401_CONFIG_RANGE)
-               val = SENSORS_LIMIT(val, -64000, 191000);
+               val = clamp_val(val, -64000, 191000);
        else
-               val = SENSORS_LIMIT(val, 0, 127000);
+               val = clamp_val(val, 0, 127000);
 
        mutex_lock(&data->update_lock);
        temp = tmp401_crit_register_to_temp(data->temp_crit[index],
                                                data->config);
-       val = SENSORS_LIMIT(val, temp - 255000, temp);
+       val = clamp_val(val, temp - 255000, temp);
        reg = ((temp - val) + 500) / 1000;
 
        i2c_smbus_write_byte_data(to_i2c_client(dev),
index 86d7f6d858b1bbe1b61720539995c6cdd1747d1f..d867e6bb2be1f7e1b1ebb41788c27f568bd7c579 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/vexpress.h>
index e0e14a9f1658b4d01bb6871aa4b29ae9d8cb627a..3123b30208c580064596bbc41f3e1e9e168957b9 100644 (file)
@@ -135,17 +135,14 @@ static inline u8 IN_TO_REG(long val, int inNum)
         * for the constants.
         */
        if (inNum <= 1)
-               return (u8)
-                   SENSORS_LIMIT((val * 21024 - 1205000) / 250000, 0, 255);
+               return (u8) clamp_val((val * 21024 - 1205000) / 250000, 0, 255);
        else if (inNum == 2)
-               return (u8)
-                   SENSORS_LIMIT((val * 15737 - 1205000) / 250000, 0, 255);
+               return (u8) clamp_val((val * 15737 - 1205000) / 250000, 0, 255);
        else if (inNum == 3)
-               return (u8)
-                   SENSORS_LIMIT((val * 10108 - 1205000) / 250000, 0, 255);
+               return (u8) clamp_val((val * 10108 - 1205000) / 250000, 0, 255);
        else
-               return (u8)
-                   SENSORS_LIMIT((val * 41714 - 12050000) / 2500000, 0, 255);
+               return (u8) clamp_val((val * 41714 - 12050000) / 2500000, 0,
+                                     255);
 }
 
 static inline long IN_FROM_REG(u8 val, int inNum)
@@ -175,8 +172,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 0;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 255);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (val) == 255 ? 0 : 1350000 / \
index 751703059faeff1910b96d06985eb194af75ebf8..dcc62f80f67bf967c8e38d9f85b3be31fe716979 100644 (file)
@@ -158,7 +158,7 @@ struct vt1211_data {
 #define IN_FROM_REG(ix, reg)   ((reg) < 3 ? 0 : (ix) == 5 ? \
                                 (((reg) - 3) * 15882 + 479) / 958 : \
                                 (((reg) - 3) * 10000 + 479) / 958)
-#define IN_TO_REG(ix, val)     (SENSORS_LIMIT((ix) == 5 ? \
+#define IN_TO_REG(ix, val)     (clamp_val((ix) == 5 ? \
                                 ((val) * 958 + 7941) / 15882 + 3 : \
                                 ((val) * 958 + 5000) / 10000 + 3, 0, 255))
 
@@ -173,7 +173,7 @@ struct vt1211_data {
                                 (ix) == 1 ? (reg) < 51 ? 0 : \
                                 ((reg) - 51) * 1000 : \
                                 ((253 - (reg)) * 2200 + 105) / 210)
-#define TEMP_TO_REG(ix, val)   SENSORS_LIMIT( \
+#define TEMP_TO_REG(ix, val)   clamp_val( \
                                 ((ix) == 0 ? ((val) + 500) / 1000 : \
                                  (ix) == 1 ? ((val) + 500) / 1000 + 51 : \
                                  253 - ((val) * 210 + 1100) / 2200), 0, 255)
@@ -183,7 +183,7 @@ struct vt1211_data {
 #define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \
                                 1310720 / (reg) / DIV_FROM_REG(div))
 #define RPM_TO_REG(val, div)   ((val) == 0 ? 255 : \
-                                SENSORS_LIMIT((1310720 / (val) / \
+                                clamp_val((1310720 / (val) / \
                                 DIV_FROM_REG(div)), 1, 254))
 
 /* ---------------------------------------------------------------------
@@ -687,7 +687,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
                                data->fan_ctl));
                break;
        case SHOW_SET_PWM_FREQ:
-               val = 135000 / SENSORS_LIMIT(val, 135000 >> 7, 135000);
+               val = 135000 / clamp_val(val, 135000 >> 7, 135000);
                /* calculate tmp = log2(val) */
                tmp = 0;
                for (val >>= 1; val > 0; val >>= 1)
@@ -845,7 +845,7 @@ static ssize_t set_pwm_auto_point_pwm(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->pwm_auto_pwm[ix][ap] = SENSORS_LIMIT(val, 0, 255);
+       data->pwm_auto_pwm[ix][ap] = clamp_val(val, 0, 255);
        vt1211_write8(data, VT1211_REG_PWM_AUTO_PWM(ix, ap),
                      data->pwm_auto_pwm[ix][ap]);
        mutex_unlock(&data->update_lock);
index a56355cef184863fc1a9aadeffb83908ba7bfead..988a2a79676496e3acd7ec5e1e8e4c1399c9bf39 100644 (file)
@@ -147,7 +147,7 @@ static inline u8 FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 0;
-       return SENSORS_LIMIT(1310720 / (rpm * div), 1, 255);
+       return clamp_val(1310720 / (rpm * div), 1, 255);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
@@ -236,7 +236,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->in_min[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+       data->in_min[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
        vt8231_write_value(data, regvoltmin[nr], data->in_min[nr]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -256,7 +256,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->in_max[nr] = SENSORS_LIMIT(((val * 958) / 10000) + 3, 0, 255);
+       data->in_max[nr] = clamp_val(((val * 958) / 10000) + 3, 0, 255);
        vt8231_write_value(data, regvoltmax[nr], data->in_max[nr]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -302,8 +302,8 @@ static ssize_t set_in5_min(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->in_min[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
-                                       0, 255);
+       data->in_min[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
+                                   0, 255);
        vt8231_write_value(data, regvoltmin[5], data->in_min[5]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -321,8 +321,8 @@ static ssize_t set_in5_max(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->in_max[5] = SENSORS_LIMIT(((val * 958 * 34) / (10000 * 54)) + 3,
-                                       0, 255);
+       data->in_max[5] = clamp_val(((val * 958 * 34) / (10000 * 54)) + 3,
+                                   0, 255);
        vt8231_write_value(data, regvoltmax[5], data->in_max[5]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -380,7 +380,7 @@ static ssize_t set_temp0_max(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+       data->temp_max[0] = clamp_val((val + 500) / 1000, 0, 255);
        vt8231_write_value(data, regtempmax[0], data->temp_max[0]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -397,7 +397,7 @@ static ssize_t set_temp0_min(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[0] = SENSORS_LIMIT((val + 500) / 1000, 0, 255);
+       data->temp_min[0] = clamp_val((val + 500) / 1000, 0, 255);
        vt8231_write_value(data, regtempmin[0], data->temp_min[0]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -444,7 +444,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+       data->temp_max[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
        vt8231_write_value(data, regtempmax[nr], data->temp_max[nr]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -463,7 +463,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[nr] = SENSORS_LIMIT(TEMP_MAXMIN_TO_REG(val), 0, 255);
+       data->temp_min[nr] = clamp_val(TEMP_MAXMIN_TO_REG(val), 0, 255);
        vt8231_write_value(data, regtempmin[nr], data->temp_min[nr]);
        mutex_unlock(&data->update_lock);
        return count;
index 0e8ffd6059a034614e281a67aa4e4f026f1c5f02..0a89211c25f6df2ad94efdb472a6cf547f502422 100644 (file)
@@ -354,8 +354,8 @@ static inline unsigned int step_time_from_reg(u8 reg, u8 mode)
 
 static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
 {
-       return SENSORS_LIMIT((mode ? (msec + 50) / 100 :
-                                               (msec + 200) / 400), 1, 255);
+       return clamp_val((mode ? (msec + 50) / 100 : (msec + 200) / 400),
+                        1, 255);
 }
 
 static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
@@ -414,8 +414,7 @@ static inline long in_from_reg(u8 reg, u8 nr, const u16 *scale_in)
 
 static inline u8 in_to_reg(u32 val, u8 nr, const u16 *scale_in)
 {
-       return SENSORS_LIMIT(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0,
-                            255);
+       return clamp_val(DIV_ROUND_CLOSEST(val * 100, scale_in[nr]), 0, 255);
 }
 
 /*
@@ -1267,7 +1266,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
 
        mutex_lock(&data->update_lock);
        data->temp_offset[nr] = val;
@@ -1435,7 +1434,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->pwm[nr] = val;
@@ -1514,7 +1513,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
        if (err < 0)
                return err;
 
-       val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
 
        mutex_lock(&data->update_lock);
        data->target_temp[nr] = val;
@@ -1540,7 +1539,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
                return err;
 
        /* Limit the temp to 0C - 15C */
-       val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
 
        mutex_lock(&data->update_lock);
        if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
@@ -1639,7 +1638,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
        err = kstrtoul(buf, 10, &val); \
        if (err < 0) \
                return err; \
-       val = SENSORS_LIMIT(val, 1, 255); \
+       val = clamp_val(val, 1, 255); \
        mutex_lock(&data->update_lock); \
        data->reg[nr] = val; \
        w83627ehf_write_value(data, data->REG_##REG[nr], val); \
index 81f486520cea604e928535c04e84ec5d99c6831b..3b9ef2d23452801166e2f278b1014ace47b11639 100644 (file)
@@ -254,16 +254,15 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
  * these macros are called: arguments may be evaluated more than once.
  * Fixing this is just not worth it.
  */
-#define IN_TO_REG(val)  (SENSORS_LIMIT((((val) + 8)/16),0,255))
+#define IN_TO_REG(val)  (clamp_val((((val) + 8) / 16), 0, 255))
 #define IN_FROM_REG(val) ((val) * 16)
 
 static inline u8 FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1,
-                            254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 #define TEMP_MIN (-128000)
@@ -275,9 +274,9 @@ static inline u8 FAN_TO_REG(long rpm, int div)
  */
 static u8 TEMP_TO_REG(long temp)
 {
-        int ntemp = SENSORS_LIMIT(temp, TEMP_MIN, TEMP_MAX);
-        ntemp += (ntemp<0 ? -500 : 500);
-        return (u8)(ntemp / 1000);
+       int ntemp = clamp_val(temp, TEMP_MIN, TEMP_MAX);
+       ntemp += (ntemp < 0 ? -500 : 500);
+       return (u8)(ntemp / 1000);
 }
 
 static int TEMP_FROM_REG(u8 reg)
@@ -287,7 +286,7 @@ static int TEMP_FROM_REG(u8 reg)
 
 #define FAN_FROM_REG(val,div) ((val)==0?-1:(val)==255?0:1350000/((val)*(div)))
 
-#define PWM_TO_REG(val) (SENSORS_LIMIT((val),0,255))
+#define PWM_TO_REG(val) (clamp_val((val), 0, 255))
 
 static inline unsigned long pwm_freq_from_reg_627hf(u8 reg)
 {
@@ -342,7 +341,7 @@ static inline u8 pwm_freq_to_reg(unsigned long val)
 static inline u8 DIV_TO_REG(long val)
 {
        int i;
-       val = SENSORS_LIMIT(val, 1, 128) >> 1;
+       val = clamp_val(val, 1, 128) >> 1;
        for (i = 0; i < 7; i++) {
                if (val == 0)
                        break;
@@ -614,8 +613,7 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
 
                /* use VRM9 calculation */
                data->in_min[0] =
-                       SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
-                                       255);
+                       clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
        else
                /* use VRM8 (standard) calculation */
                data->in_min[0] = IN_TO_REG(val);
@@ -644,8 +642,7 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
                
                /* use VRM9 calculation */
                data->in_max[0] =
-                       SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
-                                       255);
+                       clamp_val(((val * 100) - 70000 + 244) / 488, 0, 255);
        else
                /* use VRM8 (standard) calculation */
                data->in_max[0] = IN_TO_REG(val);
index 93bd286395955ccc90cb79bd8c8f445489ed6362..aeec5b1d81c93d10c81c7d79d1ef65f3996123f2 100644 (file)
@@ -159,7 +159,7 @@ static const u8 BIT_SCFG2[] = { 0x10, 0x20, 0x40 };
 #define W83781D_DEFAULT_BETA           3435
 
 /* Conversions */
-#define IN_TO_REG(val)                 SENSORS_LIMIT(((val) + 8) / 16, 0, 255)
+#define IN_TO_REG(val)                 clamp_val(((val) + 8) / 16, 0, 255)
 #define IN_FROM_REG(val)               ((val) * 16)
 
 static inline u8
@@ -167,8 +167,8 @@ FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 static inline long
@@ -181,7 +181,7 @@ FAN_FROM_REG(u8 val, int div)
        return 1350000 / (val * div);
 }
 
-#define TEMP_TO_REG(val)               SENSORS_LIMIT((val) / 1000, -127, 128)
+#define TEMP_TO_REG(val)               clamp_val((val) / 1000, -127, 128)
 #define TEMP_FROM_REG(val)             ((val) * 1000)
 
 #define BEEP_MASK_FROM_REG(val, type)  ((type) == as99127f ? \
@@ -195,9 +195,8 @@ static inline u8
 DIV_TO_REG(long val, enum chips type)
 {
        int i;
-       val = SENSORS_LIMIT(val, 1,
-                           ((type == w83781d
-                             || type == as99127f) ? 8 : 128)) >> 1;
+       val = clamp_val(val, 1,
+                       ((type == w83781d || type == as99127f) ? 8 : 128)) >> 1;
        for (i = 0; i < 7; i++) {
                if (val == 0)
                        break;
@@ -443,7 +442,7 @@ store_vrm_reg(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
-       data->vrm = SENSORS_LIMIT(val, 0, 255);
+       data->vrm = clamp_val(val, 0, 255);
 
        return count;
 }
@@ -730,7 +729,7 @@ store_pwm(struct device *dev, struct device_attribute *da, const char *buf,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+       data->pwm[nr] = clamp_val(val, 0, 255);
        w83781d_write_value(data, W83781D_REG_PWM[nr], data->pwm[nr]);
        mutex_unlock(&data->update_lock);
        return count;
index ed397c6451983473b9ca7d1dbac399194a746fb3..38dddddf887560f53add38dd9477acb6afef986f 100644 (file)
@@ -220,15 +220,15 @@ static inline int w83791d_write(struct i2c_client *client, u8 reg, u8 value)
  * in mV as would be measured on the chip input pin, need to just
  * multiply/divide by 16 to translate from/to register values.
  */
-#define IN_TO_REG(val)         (SENSORS_LIMIT((((val) + 8) / 16), 0, 255))
+#define IN_TO_REG(val)         (clamp_val((((val) + 8) / 16), 0, 255))
 #define IN_FROM_REG(val)       ((val) * 16)
 
 static u8 fan_to_reg(long rpm, int div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
@@ -273,7 +273,7 @@ static u8 div_to_reg(int nr, long val)
        int i;
 
        /* fan divisors max out at 128 */
-       val = SENSORS_LIMIT(val, 1, 128) >> 1;
+       val = clamp_val(val, 1, 128) >> 1;
        for (i = 0; i < 7; i++) {
                if (val == 0)
                        break;
@@ -747,7 +747,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
+       data->pwm[nr] = clamp_val(val, 0, 255);
        w83791d_write(client, W83791D_REG_PWM[nr], data->pwm[nr]);
        mutex_unlock(&data->update_lock);
        return count;
index 301942d084534ef748f1917b1fa3dccde1b8beed..5cb83ddf2cc6a48e1c7214a6e3a8b5706f11af1f 100644 (file)
@@ -235,8 +235,8 @@ FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0   ? -1 : \
@@ -244,16 +244,15 @@ FAN_TO_REG(long rpm, int div)
                                                1350000 / ((val) * (div))))
 
 /* for temp1 */
-#define TEMP1_TO_REG(val)      (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
-                                       : (val)) / 1000, 0, 0xff))
+#define TEMP1_TO_REG(val)      (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
+                                                     : (val)) / 1000, 0, 0xff))
 #define TEMP1_FROM_REG(val)    (((val) & 0x80 ? (val)-0x100 : (val)) * 1000)
 /* for temp2 and temp3, because they need additional resolution */
 #define TEMP_ADD_FROM_REG(val1, val2) \
        ((((val1) & 0x80 ? (val1)-0x100 \
                : (val1)) * 1000) + ((val2 & 0x80) ? 500 : 0))
 #define TEMP_ADD_TO_REG_HIGH(val) \
-       (SENSORS_LIMIT(((val) < 0 ? (val)+0x100*1000 \
-                       : (val)) / 1000, 0, 0xff))
+       (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 : (val)) / 1000, 0, 0xff))
 #define TEMP_ADD_TO_REG_LOW(val)       ((val%1000) ? 0x80 : 0x00)
 
 #define DIV_FROM_REG(val)              (1 << (val))
@@ -262,7 +261,7 @@ static inline u8
 DIV_TO_REG(long val)
 {
        int i;
-       val = SENSORS_LIMIT(val, 1, 128) >> 1;
+       val = clamp_val(val, 1, 128) >> 1;
        for (i = 0; i < 7; i++) {
                if (val == 0)
                        break;
@@ -397,7 +396,7 @@ static ssize_t store_in_##reg(struct device *dev, \
        if (err) \
                return err; \
        mutex_lock(&data->update_lock); \
-       data->in_##reg[nr] = SENSORS_LIMIT(IN_TO_REG(nr, val) / 4, 0, 255); \
+       data->in_##reg[nr] = clamp_val(IN_TO_REG(nr, val) / 4, 0, 255); \
        w83792d_write_value(client, W83792D_REG_IN_##REG[nr], \
                            data->in_##reg[nr]); \
        mutex_unlock(&data->update_lock); \
@@ -645,7 +644,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
-       val = SENSORS_LIMIT(val, 0, 255) >> 4;
+       val = clamp_val(val, 0, 255) >> 4;
 
        mutex_lock(&data->update_lock);
        val |= w83792d_read_value(client, W83792D_REG_PWM[nr]) & 0xf0;
@@ -799,7 +798,7 @@ store_thermal_cruise(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
        target_mask = w83792d_read_value(client,
                                         W83792D_REG_THERMAL[nr]) & 0x80;
-       data->thermal_cruise[nr] = SENSORS_LIMIT(target_tmp, 0, 255);
+       data->thermal_cruise[nr] = clamp_val(target_tmp, 0, 255);
        w83792d_write_value(client, W83792D_REG_THERMAL[nr],
                (data->thermal_cruise[nr]) | target_mask);
        mutex_unlock(&data->update_lock);
@@ -837,7 +836,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
        tol_mask = w83792d_read_value(client,
                W83792D_REG_TOLERANCE[nr]) & ((nr == 1) ? 0x0f : 0xf0);
-       tol_tmp = SENSORS_LIMIT(val, 0, 15);
+       tol_tmp = clamp_val(val, 0, 15);
        tol_tmp &= 0x0f;
        data->tolerance[nr] = tol_tmp;
        if (nr == 1)
@@ -881,7 +880,7 @@ store_sf2_point(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->sf2_points[index][nr] = SENSORS_LIMIT(val, 0, 127);
+       data->sf2_points[index][nr] = clamp_val(val, 0, 127);
        mask_tmp = w83792d_read_value(client,
                                        W83792D_REG_POINTS[index][nr]) & 0x80;
        w83792d_write_value(client, W83792D_REG_POINTS[index][nr],
@@ -923,7 +922,7 @@ store_sf2_level(struct device *dev, struct device_attribute *attr,
                return err;
 
        mutex_lock(&data->update_lock);
-       data->sf2_levels[index][nr] = SENSORS_LIMIT((val * 15) / 100, 0, 15);
+       data->sf2_levels[index][nr] = clamp_val((val * 15) / 100, 0, 15);
        mask_tmp = w83792d_read_value(client, W83792D_REG_LEVELS[index][nr])
                & ((nr == 3) ? 0xf0 : 0x0f);
        if (nr == 3)
index 99799fd1d917ce5a451f6a14cbf2c6da72f6a2e6..660427520c539a6993207862bdae6d2247fb2924 100644 (file)
@@ -191,7 +191,7 @@ static inline u16 FAN_TO_REG(long rpm)
 {
        if (rpm <= 0)
                return 0x0fff;
-       return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+       return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
 }
 
 static inline unsigned long TIME_FROM_REG(u8 reg)
@@ -201,7 +201,7 @@ static inline unsigned long TIME_FROM_REG(u8 reg)
 
 static inline u8 TIME_TO_REG(unsigned long val)
 {
-       return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+       return clamp_val((val + 50) / 100, 0, 0xff);
 }
 
 static inline long TEMP_FROM_REG(s8 reg)
@@ -211,7 +211,7 @@ static inline long TEMP_FROM_REG(s8 reg)
 
 static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
 {
-       return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
+       return clamp_val((val + (val < 0 ? -500 : 500)) / 1000, min, max);
 }
 
 struct w83793_data {
@@ -558,7 +558,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
                w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
                                   val);
        } else {
-               val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
+               val = clamp_val(val, 0, 0xff) >> 2;
                data->pwm[index][nr] =
                    w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
                data->pwm[index][nr] |= val;
@@ -739,7 +739,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
        if (nr == SETUP_PWM_DEFAULT) {
                data->pwm_default =
                    w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
-               data->pwm_default |= SENSORS_LIMIT(val, 0, 0xff) >> 2;
+               data->pwm_default |= clamp_val(val, 0, 0xff) >> 2;
                w83793_write_value(client, W83793_REG_PWM_DEFAULT,
                                                        data->pwm_default);
        } else if (nr == SETUP_PWM_UPTIME) {
@@ -838,7 +838,7 @@ store_sf_ctrl(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        if (nr == TEMP_FAN_MAP) {
-               val = SENSORS_LIMIT(val, 0, 255);
+               val = clamp_val(val, 0, 255);
                w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
                data->temp_fan_map[index] = val;
        } else if (nr == TEMP_PWM_ENABLE) {
@@ -907,7 +907,7 @@ store_sf2_pwm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
-       val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
+       val = clamp_val(val, 0, 0xff) >> 2;
 
        mutex_lock(&data->update_lock);
        data->sf2_pwm[index][nr] =
@@ -1003,9 +1003,9 @@ store_in(struct device *dev, struct device_attribute *attr,
                /* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
                if (nr == 1 || nr == 2)
                        val -= scale_in_add[index] / scale_in[index];
-               val = SENSORS_LIMIT(val, 0, 255);
+               val = clamp_val(val, 0, 255);
        } else {
-               val = SENSORS_LIMIT(val, 0, 0x3FF);
+               val = clamp_val(val, 0, 0x3FF);
                data->in_low_bits[nr] =
                    w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
                data->in_low_bits[nr] &= ~(0x03 << (2 * index));
index 55a4f4894531fa8a284116b080bea427a2e8e8ea..e226096148eb5eefd7db06d302f204341e4ba0a5 100644 (file)
@@ -262,7 +262,7 @@ static inline u16 fan_to_reg(long rpm)
 {
        if (rpm <= 0)
                return 0x0fff;
-       return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
+       return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
 }
 
 static inline unsigned long time_from_reg(u8 reg)
@@ -272,7 +272,7 @@ static inline unsigned long time_from_reg(u8 reg)
 
 static inline u8 time_to_reg(unsigned long val)
 {
-       return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
+       return clamp_val((val + 50) / 100, 0, 0xff);
 }
 
 static inline long temp_from_reg(s8 reg)
@@ -282,7 +282,7 @@ static inline long temp_from_reg(s8 reg)
 
 static inline s8 temp_to_reg(long val, s8 min, s8 max)
 {
-       return SENSORS_LIMIT(val / 1000, min, max);
+       return clamp_val(val / 1000, min, max);
 }
 
 static const u16 pwm_freq_cksel0[16] = {
@@ -319,7 +319,7 @@ static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
 
        /* Best fit for cksel = 1 */
        base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
-       reg1 = SENSORS_LIMIT(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
+       reg1 = clamp_val(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
        best1 = base_clock / reg1;
        reg1 = 0x80 | (reg1 - 1);
 
@@ -889,7 +889,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
                val = pwm_freq_to_reg(val, data->clkin);
                break;
        default:
-               val = SENSORS_LIMIT(val, 0, 0xff);
+               val = clamp_val(val, 0, 0xff);
                break;
        }
        w83795_write(client, W83795_REG_PWM(index, nr), val);
@@ -1126,7 +1126,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
                break;
        case TEMP_PWM_FAN_MAP:
                mutex_lock(&data->update_lock);
-               tmp = SENSORS_LIMIT(tmp, 0, 0xff);
+               tmp = clamp_val(tmp, 0, 0xff);
                w83795_write(client, W83795_REG_TFMR(index), tmp);
                data->pwm_tfmr[index] = tmp;
                mutex_unlock(&data->update_lock);
@@ -1177,13 +1177,13 @@ store_fanin(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
        switch (nr) {
        case FANIN_TARGET:
-               val = fan_to_reg(SENSORS_LIMIT(val, 0, 0xfff));
+               val = fan_to_reg(clamp_val(val, 0, 0xfff));
                w83795_write(client, W83795_REG_FTSH(index), val >> 4);
                w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0);
                data->target_speed[index] = val;
                break;
        case FANIN_TOL:
-               val = SENSORS_LIMIT(val, 0, 0x3f);
+               val = clamp_val(val, 0, 0x3f);
                w83795_write(client, W83795_REG_TFTS, val);
                data->tol_speed = val;
                break;
@@ -1227,22 +1227,22 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
        switch (nr) {
        case TEMP_PWM_TTTI:
-               val = SENSORS_LIMIT(val, 0, 0x7f);
+               val = clamp_val(val, 0, 0x7f);
                w83795_write(client, W83795_REG_TTTI(index), val);
                break;
        case TEMP_PWM_CTFS:
-               val = SENSORS_LIMIT(val, 0, 0x7f);
+               val = clamp_val(val, 0, 0x7f);
                w83795_write(client, W83795_REG_CTFS(index), val);
                break;
        case TEMP_PWM_HCT:
-               val = SENSORS_LIMIT(val, 0, 0x0f);
+               val = clamp_val(val, 0, 0x0f);
                tmp = w83795_read(client, W83795_REG_HT(index));
                tmp &= 0x0f;
                tmp |= (val << 4) & 0xf0;
                w83795_write(client, W83795_REG_HT(index), tmp);
                break;
        case TEMP_PWM_HOT:
-               val = SENSORS_LIMIT(val, 0, 0x0f);
+               val = clamp_val(val, 0, 0x0f);
                tmp = w83795_read(client, W83795_REG_HT(index));
                tmp &= 0xf0;
                tmp |= val & 0x0f;
@@ -1541,7 +1541,7 @@ store_in(struct device *dev, struct device_attribute *attr,
        if ((index >= 17) &&
            !((data->has_gain >> (index - 17)) & 1))
                val /= 8;
-       val = SENSORS_LIMIT(val, 0, 0x3FF);
+       val = clamp_val(val, 0, 0x3FF);
        mutex_lock(&data->update_lock);
 
        lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
@@ -1596,7 +1596,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
 
        switch (nr) {
        case SETUP_PWM_DEFAULT:
-               val = SENSORS_LIMIT(val, 0, 0xff);
+               val = clamp_val(val, 0, 0xff);
                break;
        case SETUP_PWM_UPTIME:
        case SETUP_PWM_DOWNTIME:
index 79710bcac2f724233746b9c416fd53384dd52e52..edb06cda5a689a4be87046b138051f5b4b42e3ca 100644 (file)
@@ -86,8 +86,8 @@ FAN_TO_REG(long rpm, int div)
 {
        if (rpm == 0)
                return 255;
-       rpm = SENSORS_LIMIT(rpm, 1, 1000000);
-       return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
+       rpm = clamp_val(rpm, 1, 1000000);
+       return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
 }
 
 #define FAN_FROM_REG(val, div) ((val) == 0   ? -1 : \
@@ -95,9 +95,8 @@ FAN_TO_REG(long rpm, int div)
                                1350000 / ((val) * (div))))
 
 /* for temp */
-#define TEMP_TO_REG(val)       (SENSORS_LIMIT(((val) < 0 ? \
-                                               (val) + 0x100 * 1000 \
-                                               : (val)) / 1000, 0, 0xff))
+#define TEMP_TO_REG(val)       (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
+                                                     : (val)) / 1000, 0, 0xff))
 #define TEMP_FROM_REG(val)     (((val) & 0x80 ? \
                                  (val) - 0x100 : (val)) * 1000)
 
@@ -106,7 +105,7 @@ FAN_TO_REG(long rpm, int div)
  * in mV as would be measured on the chip input pin, need to just
  * multiply/divide by 8 to translate from/to register values.
  */
-#define IN_TO_REG(val)         (SENSORS_LIMIT((((val) + 4) / 8), 0, 255))
+#define IN_TO_REG(val)         (clamp_val((((val) + 4) / 8), 0, 255))
 #define IN_FROM_REG(val)       ((val) * 8)
 
 #define DIV_FROM_REG(val)      (1 << (val))
@@ -115,7 +114,7 @@ static inline u8
 DIV_TO_REG(long val)
 {
        int i;
-       val = SENSORS_LIMIT(val, 1, 128) >> 1;
+       val = clamp_val(val, 1, 128) >> 1;
        for (i = 0; i < 7; i++) {
                if (val == 0)
                        break;
@@ -481,7 +480,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
-       val = SENSORS_LIMIT(val, 0, 255);
+       val = clamp_val(val, 0, 255);
 
        mutex_lock(&data->update_lock);
        data->pwm[nr] = val;
@@ -564,7 +563,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
        mutex_lock(&data->update_lock);
        tol_mask = w83l786ng_read_value(client,
            W83L786NG_REG_TOLERANCE) & ((nr == 1) ? 0x0f : 0xf0);
-       tol_tmp = SENSORS_LIMIT(val, 0, 15);
+       tol_tmp = clamp_val(val, 0, 15);
        tol_tmp &= 0x0f;
        data->tolerance[nr] = tol_tmp;
        if (nr == 1)
index cbba7db9ad594df3cd10a37e6fa8b16fc456f817..f5258c205de555ec569f5a8c967531577336344c 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include "i2c-designware-core.h"
 
 /*
@@ -725,3 +726,6 @@ u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev)
        return dw_readl(dev, DW_IC_COMP_PARAM_1);
 }
 EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param);
+
+MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core");
+MODULE_LICENSE("GPL");
index 1b1a936eccc9ef11ba1e17cb61d9b7a54b226704..d6abaf2cf2e3535b4c7c4ad58dc9e5e772b56fee 100644 (file)
@@ -127,7 +127,7 @@ struct mxs_i2c_dev {
        struct device *dev;
        void __iomem *regs;
        struct completion cmd_complete;
-       u32 cmd_err;
+       int cmd_err;
        struct i2c_adapter adapter;
        const struct mxs_i2c_speed_config *speed;
 
@@ -316,7 +316,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
        if (msg->len == 0)
                return -EINVAL;
 
-       init_completion(&i2c->cmd_complete);
+       INIT_COMPLETION(i2c->cmd_complete);
        i2c->cmd_err = 0;
 
        ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -473,6 +473,8 @@ static int mxs_i2c_probe(struct platform_device *pdev)
        i2c->dev = dev;
        i2c->speed = &mxs_i2c_95kHz_config;
 
+       init_completion(&i2c->cmd_complete);
+
        if (dev->of_node) {
                err = mxs_i2c_get_ofdata(i2c);
                if (err)
index 20d41bfa7c1989ccc6001730ce11d1117aa3fa13..4cc2f0528c8869af7d96a99e913b84209530d7c4 100644 (file)
@@ -803,7 +803,7 @@ static int errata_omap3_i462(struct omap_i2c_dev *dev)
                        if (stat & OMAP_I2C_STAT_AL) {
                                dev_err(dev->dev, "Arbitration lost\n");
                                dev->cmd_err |= OMAP_I2C_STAT_AL;
-                               omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
+                               omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
                        }
 
                        return -EIO;
@@ -963,7 +963,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                                i2c_omap_errata_i207(dev, stat);
 
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
-                       break;
+                       continue;
                }
 
                if (stat & OMAP_I2C_STAT_RRDY) {
@@ -989,7 +989,7 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                                break;
 
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_XDR);
-                       break;
+                       continue;
                }
 
                if (stat & OMAP_I2C_STAT_XRDY) {
index 3f1818b87974f14b77e1bce19a5116b1e9f7359e..e03381aee34f526cc8c8c66c67293fffdde97689 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <linux/of_i2c.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -328,6 +329,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
        adap->algo = &i2c_sirfsoc_algo;
        adap->algo_data = siic;
 
+       adap->dev.of_node = pdev->dev.of_node;
        adap->dev.parent = &pdev->dev;
        adap->nr = pdev->id;
 
@@ -371,6 +373,8 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
 
        clk_disable(clk);
 
+       of_i2c_register_devices(adap);
+
        dev_info(&pdev->dev, " I2C adapter ready to operate\n");
 
        return 0;
index 1e44d04d1b22357294d0279b23d1c2e501697556..a43c0ce5e3d8728918afa810842292d00efdac8c 100644 (file)
@@ -167,7 +167,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
        }
 
        mux->busses = devm_kzalloc(&pdev->dev,
-                                  sizeof(mux->busses) * mux->pdata->bus_count,
+                                  sizeof(*mux->busses) * mux->pdata->bus_count,
                                   GFP_KERNEL);
        if (!mux->busses) {
                dev_err(&pdev->dev, "Cannot allocate busses\n");
index 4ba384f1ab544976621e6dc48c46dd0453687fef..2df9414a72f7169fe1fccaec285c633ca45f75d1 100644 (file)
@@ -448,8 +448,6 @@ static int intel_idle_probe(void)
        else
                on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
 
-       register_cpu_notifier(&cpu_hotplug_notifier);
-
        pr_debug(PREFIX "v" INTEL_IDLE_VERSION
                " model 0x%X\n", boot_cpu_data.x86_model);
 
@@ -612,6 +610,7 @@ static int __init intel_idle_init(void)
                        return retval;
                }
        }
+       register_cpu_notifier(&cpu_hotplug_notifier);
 
        return 0;
 }
index fe4bcd7c5b12ea9ef5e4f23b56c25169038ad570..05e996fafc9d42a8dd76031dedbfd6b1d8520d0a 100644 (file)
@@ -8,6 +8,7 @@ config HID_SENSOR_ACCEL_3D
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID Accelerometers 3D"
        help
          Say yes here to build support for the HID SENSOR
index 4a5f639bc68492e6d912db45b5460d050b60cad3..bbad9b94cd75770b136499728497ff73ce4067d9 100644 (file)
@@ -411,7 +411,11 @@ static int ad7266_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               st->vref_uv = regulator_get_voltage(st->reg);
+               ret = regulator_get_voltage(st->reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               st->vref_uv = ret;
        } else {
                /* Use internal reference */
                st->vref_uv = 2500000;
index 04b013561f0fc874fe7a2df1b0409b2aa3c9d4df..a526c0e3aaa84be092d25789d3ccaffd0d056204 100644 (file)
@@ -80,7 +80,7 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
                *timestamp = pf->timestamp;
        }
 
-       iio_push_to_buffers(indio_dev, (u8 *)st->buffer);
+       iio_push_to_buffers(idev, (u8 *)st->buffer);
 
        iio_trigger_notify_done(idev->trig);
 
index b5669be6f396f53122b8f704c63a8e7e32e962d8..03b25b3dc71eb95703d2131230e2f11cb2dae03c 100644 (file)
@@ -1605,19 +1605,20 @@ static int max1363_probe(struct i2c_client *client,
 
        return 0;
 error_free_irq:
-       free_irq(st->client->irq, indio_dev);
+       if (client->irq)
+               free_irq(st->client->irq, indio_dev);
 error_uninit_buffer:
        iio_buffer_unregister(indio_dev);
 error_cleanup_buffer:
        max1363_buffer_cleanup(indio_dev);
 error_free_available_scan_masks:
        kfree(indio_dev->available_scan_masks);
-error_unregister_map:
-       iio_map_array_unregister(indio_dev, client->dev.platform_data);
 error_disable_reg:
        regulator_disable(st->reg);
 error_put_reg:
        regulator_put(st->reg);
+error_unregister_map:
+       iio_map_array_unregister(indio_dev, client->dev.platform_data);
 error_free_device:
        iio_device_free(indio_dev);
 error_out:
@@ -1635,10 +1636,8 @@ static int max1363_remove(struct i2c_client *client)
        iio_buffer_unregister(indio_dev);
        max1363_buffer_cleanup(indio_dev);
        kfree(indio_dev->available_scan_masks);
-       if (!IS_ERR(st->reg)) {
-               regulator_disable(st->reg);
-               regulator_put(st->reg);
-       }
+       regulator_disable(st->reg);
+       regulator_put(st->reg);
        iio_map_array_unregister(indio_dev, client->dev.platform_data);
        iio_device_free(indio_dev);
 
index ae10778da7aa1e7b1c11b558a562e998e78cfdcb..1178121b55b032989257e3167c6724815458ec26 100644 (file)
@@ -6,7 +6,7 @@ menu "Hid Sensor IIO Common"
 config HID_SENSOR_IIO_COMMON
        tristate "Common modules for all HID Sensor IIO drivers"
        depends on HID_SENSOR_HUB
-       select IIO_TRIGGER if IIO_BUFFER
+       select HID_SENSOR_IIO_TRIGGER if IIO_BUFFER
        help
          Say yes here to build support for HID sensor to use
          HID sensor common processing for attributes and IIO triggers.
@@ -14,6 +14,17 @@ config HID_SENSOR_IIO_COMMON
          HID sensor drivers, this module contains processing for those
          attributes.
 
+config HID_SENSOR_IIO_TRIGGER
+       tristate "Common module (trigger) for all HID Sensor IIO drivers"
+       depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON
+       select IIO_TRIGGER
+       help
+         Say yes here to build trigger support for HID sensors.
+         Triggers will be send if all requested attributes were read.
+
+         If this driver is compiled as a module, it will be named
+         hid-sensor-trigger.
+
 config HID_SENSOR_ENUM_BASE_QUIRKS
        bool "ENUM base quirks for HID Sensor IIO drivers"
        depends on HID_SENSOR_IIO_COMMON
index 1f463e00c2426b706223827afc7363614d80691f..22e7c5a82325e13c3cfbded83dc41f0de45385ae 100644 (file)
@@ -3,4 +3,5 @@
 #
 
 obj-$(CONFIG_HID_SENSOR_IIO_COMMON) += hid-sensor-iio-common.o
-hid-sensor-iio-common-y := hid-sensor-attributes.o hid-sensor-trigger.o
+obj-$(CONFIG_HID_SENSOR_IIO_TRIGGER) += hid-sensor-trigger.o
+hid-sensor-iio-common-y := hid-sensor-attributes.o
index 6c7898c765d9599cb5a531a709c45d9a6a4944b0..483fc379a2da9ca64a29fb13313b16f3d76d8b43 100644 (file)
@@ -406,7 +406,11 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
                        goto error_free_reg;
                }
 
-               st->vref = regulator_get_voltage(st->vref_reg);
+               ret = regulator_get_voltage(st->vref_reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               st->vref = ret;
        } else {
                st->vref = st->chip_info->int_vref;
                ctrl |= AD5380_CTRL_INT_VREF_EN;
index 29f653dab2f7bf7270f086737222cb48d5ed76e4..f5583aedfb597cf927e3e39969b80e5e0184085e 100644 (file)
@@ -226,7 +226,11 @@ static int ad5446_probe(struct device *dev, const char *name,
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(reg);
+               ret = regulator_get_voltage(reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        indio_dev = iio_device_alloc(sizeof(*st));
index b2a31a0468eddc1d689852126877ce84842bd980..0661829f27737ed4f70e1a29137c6c8a18169457 100644 (file)
@@ -296,7 +296,11 @@ static int ad5504_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(reg);
+               ret = regulator_get_voltage(reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        spi_set_drvdata(spi, indio_dev);
index e9947969f9fefce62047448e09e8ae4203bb5818..f6e116627b714e044569c1ef5e85fff59dfc5c20 100644 (file)
@@ -238,7 +238,11 @@ static int ad5624r_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(st->reg);
+               ret = regulator_get_voltage(st->reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        spi_set_drvdata(spi, indio_dev);
index 36e51382ae528116201f67cb2cbd1d7b1372261a..ca9609d7a15c256a78b058a26c9191542e06a8f3 100644 (file)
@@ -332,7 +332,11 @@ static int ad5686_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg;
 
-               voltage_uv = regulator_get_voltage(st->reg);
+               ret = regulator_get_voltage(st->reg);
+               if (ret < 0)
+                       goto error_disable_reg;
+
+               voltage_uv = ret;
        }
 
        st->chip_info =
index c84180f23139fd25191539ebf266fb122ca85950..6407b5407dddec57fe53ca20f9cba3e5bf90358e 100644 (file)
@@ -365,7 +365,11 @@ static int ad5791_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg_pos;
 
-               pos_voltage_uv = regulator_get_voltage(st->reg_vdd);
+               ret = regulator_get_voltage(st->reg_vdd);
+               if (ret < 0)
+                       goto error_disable_reg_pos;
+
+               pos_voltage_uv = ret;
        }
 
        st->reg_vss = regulator_get(&spi->dev, "vss");
@@ -374,7 +378,11 @@ static int ad5791_probe(struct spi_device *spi)
                if (ret)
                        goto error_put_reg_neg;
 
-               neg_voltage_uv = regulator_get_voltage(st->reg_vss);
+               ret = regulator_get_voltage(st->reg_vss);
+               if (ret < 0)
+                       goto error_disable_reg_neg;
+
+               neg_voltage_uv = ret;
        }
 
        st->pwr_down = true;
@@ -428,6 +436,7 @@ error_put_reg_neg:
        if (!IS_ERR(st->reg_vss))
                regulator_put(st->reg_vss);
 
+error_disable_reg_pos:
        if (!IS_ERR(st->reg_vdd))
                regulator_disable(st->reg_vdd);
 error_put_reg_pos:
index e5033b4cfba0cf3c1c88fba9d766c5d433e69a60..a884252ac66b477db3a0fce966b40fb76ae456b2 100644 (file)
@@ -173,7 +173,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
                        } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
                } while (r_cnt == 0);
 
-               tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
+               tmp = freq * (u64)st->r1_mod + (st->fpfd >> 1);
                do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
                st->r0_fract = do_div(tmp, st->r1_mod);
                st->r0_int = tmp;
index 48ed1483ff27260e8ac24634cc670acf32f33172..96b68f63a902580c6a45c7265641397ffdda9d52 100644 (file)
@@ -17,6 +17,7 @@ config HID_SENSOR_GYRO_3D
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID Gyroscope 3D"
        help
          Say yes here to build support for the HID SENSOR
index 1763c9bcb98ae19817086c4713a6067552d9e74c..dbf80abc834fd3f848a7945b9ddbf38da5a968d7 100644 (file)
@@ -47,6 +47,7 @@ config HID_SENSOR_ALS
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID ALS"
        help
          Say yes here to build support for the HID SENSOR
index c1f0cdd57037ee863e0ba12d639c4bc536a80a5d..ff11d68225cfb6d2e8f819345eb15907857601d4 100644 (file)
@@ -8,6 +8,7 @@ config HID_SENSOR_MAGNETOMETER_3D
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select HID_SENSOR_IIO_COMMON
+       select HID_SENSOR_IIO_TRIGGER
        tristate "HID Magenetometer 3D"
        help
          Say yes here to build support for the HID SENSOR
index 4850d03870c297667e65c96b50d5ccec50b5aa01..35275099cafd660cd2fc20ab865d08c417f51735 100644 (file)
@@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
                struct qib_qp __rcu **qpp;
 
                qpp = &dev->qp_table[n];
-               q = rcu_dereference_protected(*qpp,
-                       lockdep_is_held(&dev->qpt_lock));
-               for (; q; qpp = &q->next) {
+               for (; (q = rcu_dereference_protected(*qpp,
+                               lockdep_is_held(&dev->qpt_lock))) != NULL;
+                               qpp = &q->next)
                        if (q == qp) {
                                atomic_dec(&qp->refcount);
                                *qpp = qp->next;
                                rcu_assign_pointer(qp->next, NULL);
-                               q = rcu_dereference_protected(*qpp,
-                                       lockdep_is_held(&dev->qpt_lock));
                                break;
                        }
-                       q = rcu_dereference_protected(*qpp,
-                               lockdep_is_held(&dev->qpt_lock));
-               }
        }
 
        spin_unlock_irqrestore(&dev->qpt_lock, flags);
index 03103d2bd641e715ce8ab18c147173bdc5624f21..67b0c1d23678d26565981cf9815993027c0a1b5e 100644 (file)
@@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
 
        tx_req->mapping = addr;
 
+       skb_orphan(skb);
+       skb_dst_drop(skb);
+
        rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
                       addr, skb->len);
        if (unlikely(rc)) {
@@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                dev->trans_start = jiffies;
                ++tx->tx_head;
 
-               skb_orphan(skb);
-               skb_dst_drop(skb);
-
                if (++priv->tx_outstanding == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                                  tx->qp->qp_num);
index a1bca70e20aa5b0ea33d4d5410e35b79531f77b6..2cfa76f5d99eac87bf788eb41bb2a7c3815ec700 100644 (file)
@@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                netif_stop_queue(dev);
        }
 
+       skb_orphan(skb);
+       skb_dst_drop(skb);
+
        rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
                       address->ah, qpn, tx_req, phead, hlen);
        if (unlikely(rc)) {
@@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
 
                address->last_send = priv->tx_head;
                ++priv->tx_head;
-
-               skb_orphan(skb);
-               skb_dst_drop(skb);
        }
 
        if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
index 55f7e57d4e4279ebe089dd687f08d9457bc65cca..38b523a1ece0634e7855b5eae62b94b6fe7fd160 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menu "Input device support"
-       depends on !S390 && !UML
+       depends on !UML
 
 config INPUT
        tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
index 47a6009dbf43f04ba6dcd883513127857591a70f..71db1930573f45a4226fcf2ed84ab0b19a4c6aeb 100644 (file)
@@ -18,6 +18,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
 {
        if (dev->absinfo && test_bit(src, dev->absbit)) {
                dev->absinfo[dst] = dev->absinfo[src];
+               dev->absinfo[dst].fuzz = 0;
                dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst);
        }
 }
index ce01332f7b3a8c770b1c31875b4d555dd38e56a3..c0446992892533b24d3853d043cf088451db5662 100644 (file)
@@ -1785,12 +1785,13 @@ static void devm_input_device_release(struct device *dev, void *res)
  * its driver (or binding fails). Once managed input device is allocated,
  * it is ready to be set up and registered in the same fashion as regular
  * input device. There are no special devm_input_device_[un]register()
- * variants, regular ones work with both managed and unmanaged devices.
+ * variants, regular ones work with both managed and unmanaged devices,
+ * should you need them. In most cases however, managed input device need
+ * not be explicitly unregistered or freed.
  *
  * NOTE: the owner device is set up as parent of input device and users
  * should not override it.
  */
-
 struct input_dev *devm_input_allocate_device(struct device *dev)
 {
        struct input_dev *input;
@@ -2004,6 +2005,17 @@ static void devm_input_device_unregister(struct device *dev, void *res)
  * Once device has been successfully registered it can be unregistered
  * with input_unregister_device(); input_free_device() should not be
  * called in this case.
+ *
+ * Note that this function is also used to register managed input devices
+ * (ones allocated with devm_input_allocate_device()). Such managed input
+ * devices need not be explicitly unregistered or freed, their tear down
+ * is controlled by the devres infrastructure. It is also worth noting
+ * that tear down of managed input devices is internally a 2-step process:
+ * registered managed input device is first unregistered, but stays in
+ * memory and can still handle input_event() calls (although events will
+ * not be delivered anywhere). The freeing of managed input device will
+ * happen later, when devres stack is unwound to the point where device
+ * allocation was made.
  */
 int input_register_device(struct input_dev *dev)
 {
index 358cd7ee905b7ff4f9a7498e277341037437bf19..7cd74e29cbc87a6495277ecd74d7135ebcd75ad3 100644 (file)
@@ -162,7 +162,7 @@ static unsigned int get_time_pit(void)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "PCC"
-#elif defined(CONFIG_MN10300)
+#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x, y)    ((x) - (y))
 #define TIME_NAME      "TSC"
index f8f892b076e8e229e45762fd39c5d8bea74f73c5..b76ac580703ce5dc9ef97fac6620adc47ea44273 100644 (file)
@@ -12,7 +12,7 @@
  * the Free Software Foundation.
 */
 
-/* #define WK0701_DEBUG */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #define RESERVE 20000
 #define SYNC_PULSE 1306000
@@ -67,6 +67,7 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
 {
        int i;
        int val1, val2, val3, val4, val5, val6, val7, val8;
+       int magic, magic_bit;
        int crc1, crc2;
 
        for (crc1 = crc2 = i = 0; i < 10; i++) {
@@ -102,17 +103,12 @@ static inline void walkera0701_parse_frame(struct walkera_dev *w)
        val8 = (w->buf[18] & 1) << 8 | (w->buf[19] << 4) | w->buf[20];
        val8 *= (w->buf[18] & 2) - 1;   /*sign */
 
-#ifdef WK0701_DEBUG
-       {
-               int magic, magic_bit;
-               magic = (w->buf[21] << 4) | w->buf[22];
-               magic_bit = (w->buf[24] & 8) >> 3;
-               printk(KERN_DEBUG
-                      "walkera0701: %4d %4d %4d %4d  %4d %4d %4d %4d (magic %2x %d)\n",
-                      val1, val2, val3, val4, val5, val6, val7, val8, magic,
-                      magic_bit);
-       }
-#endif
+       magic = (w->buf[21] << 4) | w->buf[22];
+       magic_bit = (w->buf[24] & 8) >> 3;
+       pr_debug("%4d %4d %4d %4d  %4d %4d %4d %4d (magic %2x %d)\n",
+                val1, val2, val3, val4, val5, val6, val7, val8,
+                magic, magic_bit);
+
        input_report_abs(w->input_dev, ABS_X, val2);
        input_report_abs(w->input_dev, ABS_Y, val1);
        input_report_abs(w->input_dev, ABS_Z, val6);
@@ -187,6 +183,9 @@ static int walkera0701_open(struct input_dev *dev)
 {
        struct walkera_dev *w = input_get_drvdata(dev);
 
+       if (parport_claim(w->pardevice))
+               return -EBUSY;
+
        parport_enable_irq(w->parport);
        return 0;
 }
@@ -197,40 +196,51 @@ static void walkera0701_close(struct input_dev *dev)
 
        parport_disable_irq(w->parport);
        hrtimer_cancel(&w->timer);
+
+       parport_release(w->pardevice);
 }
 
 static int walkera0701_connect(struct walkera_dev *w, int parport)
 {
-       int err = -ENODEV;
+       int error;
 
        w->parport = parport_find_number(parport);
-       if (w->parport == NULL)
+       if (!w->parport) {
+               pr_err("parport %d does not exist\n", parport);
                return -ENODEV;
+       }
 
        if (w->parport->irq == -1) {
-               printk(KERN_ERR "walkera0701: parport without interrupt\n");
-               goto init_err;
+               pr_err("parport %d does not have interrupt assigned\n",
+                       parport);
+               error = -EINVAL;
+               goto err_put_parport;
        }
 
-       err = -EBUSY;
        w->pardevice = parport_register_device(w->parport, "walkera0701",
                                    NULL, NULL, walkera0701_irq_handler,
                                    PARPORT_DEV_EXCL, w);
-       if (!w->pardevice)
-               goto init_err;
-
-       if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT))
-               goto init_err1;
+       if (!w->pardevice) {
+               pr_err("failed to register parport device\n");
+               error = -EIO;
+               goto err_put_parport;
+       }
 
-       if (parport_claim(w->pardevice))
-               goto init_err1;
+       if (parport_negotiate(w->pardevice->port, IEEE1284_MODE_COMPAT)) {
+               pr_err("failed to negotiate parport mode\n");
+               error = -EIO;
+               goto err_unregister_device;
+       }
 
        hrtimer_init(&w->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        w->timer.function = timer_handler;
 
        w->input_dev = input_allocate_device();
-       if (!w->input_dev)
-               goto init_err2;
+       if (!w->input_dev) {
+               pr_err("failed to allocate input device\n");
+               error = -ENOMEM;
+               goto err_unregister_device;
+       }
 
        input_set_drvdata(w->input_dev, w);
        w->input_dev->name = "Walkera WK-0701 TX";
@@ -241,6 +251,7 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
        w->input_dev->id.vendor = 0x0001;
        w->input_dev->id.product = 0x0001;
        w->input_dev->id.version = 0x0100;
+       w->input_dev->dev.parent = w->parport->dev;
        w->input_dev->open = walkera0701_open;
        w->input_dev->close = walkera0701_close;
 
@@ -254,27 +265,26 @@ static int walkera0701_connect(struct walkera_dev *w, int parport)
        input_set_abs_params(w->input_dev, ABS_RUDDER, -512, 512, 0, 0);
        input_set_abs_params(w->input_dev, ABS_MISC, -512, 512, 0, 0);
 
-       err = input_register_device(w->input_dev);
-       if (err)
-               goto init_err3;
+       error = input_register_device(w->input_dev);
+       if (error) {
+               pr_err("failed to register input device\n");
+               goto err_free_input_dev;
+       }
 
        return 0;
 
- init_err3:
+err_free_input_dev:
        input_free_device(w->input_dev);
- init_err2:
-       parport_release(w->pardevice);
- init_err1:
+err_unregister_device:
        parport_unregister_device(w->pardevice);
- init_err:
+err_put_parport:
        parport_put_port(w->parport);
-       return err;
+       return error;
 }
 
 static void walkera0701_disconnect(struct walkera_dev *w)
 {
        input_unregister_device(w->input_dev);
-       parport_release(w->pardevice);
        parport_unregister_device(w->pardevice);
        parport_put_port(w->parport);
 }
index 5a240c60342d8a8e7c3381a576103c2949bdce1d..ac050066700086d2477d1e56f459bc6bc6f313d2 100644 (file)
@@ -224,7 +224,7 @@ config KEYBOARD_TCA6416
 
 config KEYBOARD_TCA8418
        tristate "TCA8418 Keypad Support"
-       depends on I2C
+       depends on I2C && GENERIC_HARDIRQS
        select INPUT_MATRIXKMAP
        help
          This driver implements basic keypad functionality
@@ -303,7 +303,7 @@ config KEYBOARD_HP7XX
 
 config KEYBOARD_LM8323
        tristate "LM8323 keypad chip"
-       depends on I2C
+       depends on I2C && GENERIC_HARDIRQS
        depends on LEDS_CLASS
        help
          If you say yes here you get support for the National Semiconductor
@@ -420,7 +420,7 @@ config KEYBOARD_NOMADIK
 
 config KEYBOARD_TEGRA
        tristate "NVIDIA Tegra internal matrix keyboard controller support"
-       depends on ARCH_TEGRA
+       depends on ARCH_TEGRA && OF
        select INPUT_MATRIXKMAP
        help
          Say Y here if you want to use a matrix keyboard connected directly
@@ -479,6 +479,16 @@ config KEYBOARD_SAMSUNG
          To compile this driver as a module, choose M here: the
          module will be called samsung-keypad.
 
+config KEYBOARD_GOLDFISH_EVENTS
+       depends on GOLDFISH
+       tristate "Generic Input Event device for Goldfish"
+       help
+         Say Y here to get an input event device for the Goldfish virtual
+         device emulator.
+
+         To compile this driver as a module, choose M here: the
+         module will be called goldfish-events.
+
 config KEYBOARD_STOWAWAY
        tristate "Stowaway keyboard"
        select SERIO
index 44e76002f54bf3c4d7250c3ab345b70d89195ad4..49b16453d00ef84184d2beddbee95d7c3a3e33ea 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_KEYBOARD_ATKBD)          += atkbd.o
 obj-$(CONFIG_KEYBOARD_BFIN)            += bf54x-keys.o
 obj-$(CONFIG_KEYBOARD_DAVINCI)         += davinci_keyscan.o
 obj-$(CONFIG_KEYBOARD_EP93XX)          += ep93xx_keypad.o
+obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
 obj-$(CONFIG_KEYBOARD_GPIO)            += gpio_keys.o
 obj-$(CONFIG_KEYBOARD_GPIO_POLLED)     += gpio_keys_polled.o
 obj-$(CONFIG_KEYBOARD_TCA6416)         += tca6416-keypad.o
index add5ffd9fe26e675daf2548928175cdf2b0bdb73..2626773ff29b956de97d5c62d383e36e13ee4868 100644 (file)
@@ -676,6 +676,39 @@ static inline void atkbd_disable(struct atkbd *atkbd)
        serio_continue_rx(atkbd->ps2dev.serio);
 }
 
+static int atkbd_activate(struct atkbd *atkbd)
+{
+       struct ps2dev *ps2dev = &atkbd->ps2dev;
+
+/*
+ * Enable the keyboard to receive keystrokes.
+ */
+
+       if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
+               dev_err(&ps2dev->serio->dev,
+                       "Failed to enable keyboard on %s\n",
+                       ps2dev->serio->phys);
+               return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * atkbd_deactivate() resets and disables the keyboard from sending
+ * keystrokes.
+ */
+
+static void atkbd_deactivate(struct atkbd *atkbd)
+{
+       struct ps2dev *ps2dev = &atkbd->ps2dev;
+
+       if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_DIS))
+               dev_err(&ps2dev->serio->dev,
+                       "Failed to deactivate keyboard on %s\n",
+                       ps2dev->serio->phys);
+}
+
 /*
  * atkbd_probe() probes for an AT keyboard on a serio port.
  */
@@ -726,11 +759,17 @@ static int atkbd_probe(struct atkbd *atkbd)
 
        if (atkbd->id == 0xaca1 && atkbd->translated) {
                dev_err(&ps2dev->serio->dev,
-                       "NCD terminal keyboards are only supported on non-translating controlelrs. "
+                       "NCD terminal keyboards are only supported on non-translating controllers. "
                        "Use i8042.direct=1 to disable translation.\n");
                return -1;
        }
 
+/*
+ * Make sure nothing is coming from the keyboard and disturbs our
+ * internal state.
+ */
+       atkbd_deactivate(atkbd);
+
        return 0;
 }
 
@@ -825,24 +864,6 @@ static int atkbd_reset_state(struct atkbd *atkbd)
        return 0;
 }
 
-static int atkbd_activate(struct atkbd *atkbd)
-{
-       struct ps2dev *ps2dev = &atkbd->ps2dev;
-
-/*
- * Enable the keyboard to receive keystrokes.
- */
-
-       if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
-               dev_err(&ps2dev->serio->dev,
-                       "Failed to enable keyboard on %s\n",
-                       ps2dev->serio->phys);
-               return -1;
-       }
-
-       return 0;
-}
-
 /*
  * atkbd_cleanup() restores the keyboard state so that BIOS is happy after a
  * reboot.
@@ -1150,7 +1171,6 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
 
                atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
                atkbd_reset_state(atkbd);
-               atkbd_activate(atkbd);
 
        } else {
                atkbd->set = 2;
@@ -1165,6 +1185,8 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
                goto fail3;
 
        atkbd_enable(atkbd);
+       if (serio->write)
+               atkbd_activate(atkbd);
 
        err = input_register_device(atkbd->dev);
        if (err)
@@ -1208,8 +1230,6 @@ static int atkbd_reconnect(struct serio *serio)
                if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
                        goto out;
 
-               atkbd_activate(atkbd);
-
                /*
                 * Restore LED state and repeat rate. While input core
                 * will do this for us at resume time reconnect may happen
@@ -1223,7 +1243,17 @@ static int atkbd_reconnect(struct serio *serio)
 
        }
 
+       /*
+        * Reset our state machine in case reconnect happened in the middle
+        * of multi-byte scancode.
+        */
+       atkbd->xl_bit = 0;
+       atkbd->emul = 0;
+
        atkbd_enable(atkbd);
+       if (atkbd->write)
+               atkbd_activate(atkbd);
+
        retval = 0;
 
  out:
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
new file mode 100644 (file)
index 0000000..9f60a2e
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+enum {
+       REG_READ        = 0x00,
+       REG_SET_PAGE    = 0x00,
+       REG_LEN         = 0x04,
+       REG_DATA        = 0x08,
+
+       PAGE_NAME       = 0x00000,
+       PAGE_EVBITS     = 0x10000,
+       PAGE_ABSDATA    = 0x20000 | EV_ABS,
+};
+
+struct event_dev {
+       struct input_dev *input;
+       int irq;
+       void __iomem *addr;
+       char name[0];
+};
+
+static irqreturn_t events_interrupt(int irq, void *dev_id)
+{
+       struct event_dev *edev = dev_id;
+       unsigned type, code, value;
+
+       type = __raw_readl(edev->addr + REG_READ);
+       code = __raw_readl(edev->addr + REG_READ);
+       value = __raw_readl(edev->addr + REG_READ);
+
+       input_event(edev->input, type, code, value);
+       input_sync(edev->input);
+       return IRQ_HANDLED;
+}
+
+static void events_import_bits(struct event_dev *edev,
+                       unsigned long bits[], unsigned type, size_t count)
+{
+       void __iomem *addr = edev->addr;
+       int i, j;
+       size_t size;
+       uint8_t val;
+
+       __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE);
+
+       size = __raw_readl(addr + REG_LEN) * 8;
+       if (size < count)
+               count = size;
+
+       addr += REG_DATA;
+       for (i = 0; i < count; i += 8) {
+               val = __raw_readb(addr++);
+               for (j = 0; j < 8; j++)
+                       if (val & 1 << j)
+                               set_bit(i + j, bits);
+       }
+}
+
+static void events_import_abs_params(struct event_dev *edev)
+{
+       struct input_dev *input_dev = edev->input;
+       void __iomem *addr = edev->addr;
+       u32 val[4];
+       int count;
+       int i, j;
+
+       __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE);
+
+       count = __raw_readl(addr + REG_LEN) / sizeof(val);
+       if (count > ABS_MAX)
+               count = ABS_MAX;
+
+       for (i = 0; i < count; i++) {
+               if (!test_bit(i, input_dev->absbit))
+                       continue;
+
+               for (j = 0; j < ARRAY_SIZE(val); j++) {
+                       int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+                       val[j] = __raw_readl(edev->addr + REG_DATA + offset);
+               }
+
+               input_set_abs_params(input_dev, i,
+                                    val[0], val[1], val[2], val[3]);
+       }
+}
+
+static int events_probe(struct platform_device *pdev)
+{
+       struct input_dev *input_dev;
+       struct event_dev *edev;
+       struct resource *res;
+       unsigned keymapnamelen;
+       void __iomem *addr;
+       int irq;
+       int i;
+       int error;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return -EINVAL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -EINVAL;
+
+       addr = devm_ioremap(&pdev->dev, res->start, 4096);
+       if (!addr)
+               return -ENOMEM;
+
+       __raw_writel(PAGE_NAME, addr + REG_SET_PAGE);
+       keymapnamelen = __raw_readl(addr + REG_LEN);
+
+       edev = devm_kzalloc(&pdev->dev,
+                           sizeof(struct event_dev) + keymapnamelen + 1,
+                           GFP_KERNEL);
+       if (!edev)
+               return -ENOMEM;
+
+       input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!input_dev)
+               return -ENOMEM;
+
+       edev->input = input_dev;
+       edev->addr = addr;
+       edev->irq = irq;
+
+       for (i = 0; i < keymapnamelen; i++)
+               edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
+
+       pr_debug("events_probe() keymap=%s\n", edev->name);
+
+       input_dev->name = edev->name;
+       input_dev->id.bustype = BUS_HOST;
+
+       events_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX);
+       events_import_bits(edev, input_dev->keybit, EV_KEY, KEY_MAX);
+       events_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX);
+       events_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX);
+       events_import_bits(edev, input_dev->mscbit, EV_MSC, MSC_MAX);
+       events_import_bits(edev, input_dev->ledbit, EV_LED, LED_MAX);
+       events_import_bits(edev, input_dev->sndbit, EV_SND, SND_MAX);
+       events_import_bits(edev, input_dev->ffbit, EV_FF, FF_MAX);
+       events_import_bits(edev, input_dev->swbit, EV_SW, SW_MAX);
+
+       events_import_abs_params(edev);
+
+       error = devm_request_irq(&pdev->dev, edev->irq, events_interrupt, 0,
+                                "goldfish-events-keypad", edev);
+       if (error)
+               return error;
+
+       error = input_register_device(input_dev);
+       if (error)
+               return error;
+
+       return 0;
+}
+
+static struct platform_driver events_driver = {
+       .probe  = events_probe,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "goldfish_events",
+       },
+};
+
+module_platform_driver(events_driver);
+
+MODULE_AUTHOR("Brian Swetland");
+MODULE_DESCRIPTION("Goldfish Event Device");
+MODULE_LICENSE("GPL");
index 6d150e3e1f5594dac7726ea25ec51dc0a61f8137..98f9113251d2bf31bd8763a537c8795306040455 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/timer.h>
@@ -414,15 +415,23 @@ open_err:
        return -EIO;
 }
 
+#ifdef CONFIG_OF
+static struct of_device_id imx_keypad_of_match[] = {
+       { .compatible = "fsl,imx21-kpp", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_keypad_of_match);
+#endif
+
 static int imx_keypad_probe(struct platform_device *pdev)
 {
        const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
        struct imx_keypad *keypad;
        struct input_dev *input_dev;
        struct resource *res;
-       int irq, error, i;
+       int irq, error, i, row, col;
 
-       if (keymap_data == NULL) {
+       if (!keymap_data && !pdev->dev.of_node) {
                dev_err(&pdev->dev, "no keymap defined\n");
                return -EINVAL;
        }
@@ -480,22 +489,6 @@ static int imx_keypad_probe(struct platform_device *pdev)
                goto failed_unmap;
        }
 
-       /* Search for rows and cols enabled */
-       for (i = 0; i < keymap_data->keymap_size; i++) {
-               keypad->rows_en_mask |= 1 << KEY_ROW(keymap_data->keymap[i]);
-               keypad->cols_en_mask |= 1 << KEY_COL(keymap_data->keymap[i]);
-       }
-
-       if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
-           keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
-               dev_err(&pdev->dev,
-                       "invalid key data (too many rows or colums)\n");
-               error = -EINVAL;
-               goto failed_clock_put;
-       }
-       dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
-       dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
-
        /* Init the Input device */
        input_dev->name = pdev->name;
        input_dev->id.bustype = BUS_HOST;
@@ -512,6 +505,19 @@ static int imx_keypad_probe(struct platform_device *pdev)
                goto failed_clock_put;
        }
 
+       /* Search for rows and cols enabled */
+       for (row = 0; row < MAX_MATRIX_KEY_ROWS; row++) {
+               for (col = 0; col < MAX_MATRIX_KEY_COLS; col++) {
+                       i = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
+                       if (keypad->keycodes[i] != KEY_RESERVED) {
+                               keypad->rows_en_mask |= 1 << row;
+                               keypad->cols_en_mask |= 1 << col;
+                       }
+               }
+       }
+       dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
+       dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);
+
        __set_bit(EV_REP, input_dev->evbit);
        input_set_capability(input_dev, EV_MSC, MSC_SCAN);
        input_set_drvdata(input_dev, keypad);
@@ -631,6 +637,7 @@ static struct platform_driver imx_keypad_driver = {
                .name   = "imx-keypad",
                .owner  = THIS_MODULE,
                .pm     = &imx_kbd_pm_ops,
+               .of_match_table = of_match_ptr(imx_keypad_of_match),
        },
        .probe          = imx_keypad_probe,
        .remove         = imx_keypad_remove,
index 93c812662134392aa973d9dcff4ffec97b601205..0de23f41b2d316364b8a0f50b8b1b72ca5c9555b 100644 (file)
@@ -398,7 +398,7 @@ static irqreturn_t lm8323_irq(int irq, void *_lm)
                        lm8323_configure(lm);
                }
                for (i = 0; i < LM8323_NUM_PWMS; i++) {
-                       if (ints & (1 << (INT_PWM1 + i))) {
+                       if (ints & (INT_PWM1 << i)) {
                                dev_vdbg(&lm->client->dev,
                                         "pwm%d engine completed\n", i);
                                pwm_done(&lm->pwm[i]);
index 3dc2b0f27b0c48386cb13bd367df67d5b9cbe214..1c0ddad0a1ccdeb6444c80ff3443f7f3d40c079b 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
 #define QT2160_CMD_GPIOS      6
 #define QT2160_CMD_SUBVER     7
 #define QT2160_CMD_CALIBRATE  10
+#define QT2160_CMD_DRIVE_X    70
+#define QT2160_CMD_PWMEN_X    74
+#define QT2160_CMD_PWM_DUTY   76
+
+#define QT2160_NUM_LEDS_X      8
 
 #define QT2160_CYCLE_INTERVAL  (2*HZ)
 
@@ -49,6 +55,17 @@ static unsigned char qt2160_key2code[] = {
        KEY_C, KEY_D, KEY_E, KEY_F,
 };
 
+#ifdef CONFIG_LEDS_CLASS
+struct qt2160_led {
+       struct qt2160_data *qt2160;
+       struct led_classdev cdev;
+       struct work_struct work;
+       char name[32];
+       int id;
+       enum led_brightness new_brightness;
+};
+#endif
+
 struct qt2160_data {
        struct i2c_client *client;
        struct input_dev *input;
@@ -56,8 +73,61 @@ struct qt2160_data {
        spinlock_t lock;        /* Protects canceling/rescheduling of dwork */
        unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
        u16 key_matrix;
+#ifdef CONFIG_LEDS_CLASS
+       struct qt2160_led leds[QT2160_NUM_LEDS_X];
+       struct mutex led_lock;
+#endif
 };
 
+static int qt2160_read(struct i2c_client *client, u8 reg);
+static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
+
+#ifdef CONFIG_LEDS_CLASS
+
+static void qt2160_led_work(struct work_struct *work)
+{
+       struct qt2160_led *led = container_of(work, struct qt2160_led, work);
+       struct qt2160_data *qt2160 = led->qt2160;
+       struct i2c_client *client = qt2160->client;
+       int value = led->new_brightness;
+       u32 drive, pwmen;
+
+       mutex_lock(&qt2160->led_lock);
+
+       drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
+       pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
+       if (value != LED_OFF) {
+               drive |= (1 << led->id);
+               pwmen |= (1 << led->id);
+
+       } else {
+               drive &= ~(1 << led->id);
+               pwmen &= ~(1 << led->id);
+       }
+       qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
+       qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
+
+       /*
+        * Changing this register will change the brightness
+        * of every LED in the qt2160. It's a HW limitation.
+        */
+       if (value != LED_OFF)
+               qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
+
+       mutex_unlock(&qt2160->led_lock);
+}
+
+static void qt2160_led_set(struct led_classdev *cdev,
+                          enum led_brightness value)
+{
+       struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
+
+       led->new_brightness = value;
+       schedule_work(&led->work);
+}
+
+#endif /* CONFIG_LEDS_CLASS */
+
 static int qt2160_read_block(struct i2c_client *client,
                             u8 inireg, u8 *buffer, unsigned int count)
 {
@@ -216,6 +286,63 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
        return ret;
 }
 
+#ifdef CONFIG_LEDS_CLASS
+
+static int qt2160_register_leds(struct qt2160_data *qt2160)
+{
+       struct i2c_client *client = qt2160->client;
+       int ret;
+       int i;
+
+       mutex_init(&qt2160->led_lock);
+
+       for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+               struct qt2160_led *led = &qt2160->leds[i];
+
+               snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
+               led->cdev.name = led->name;
+               led->cdev.brightness_set = qt2160_led_set;
+               led->cdev.brightness = LED_OFF;
+               led->id = i;
+               led->qt2160 = qt2160;
+
+               INIT_WORK(&led->work, qt2160_led_work);
+
+               ret = led_classdev_register(&client->dev, &led->cdev);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* Tur off LEDs */
+       qt2160_write(client, QT2160_CMD_DRIVE_X, 0);
+       qt2160_write(client, QT2160_CMD_PWMEN_X, 0);
+       qt2160_write(client, QT2160_CMD_PWM_DUTY, 0);
+
+       return 0;
+}
+
+static void qt2160_unregister_leds(struct qt2160_data *qt2160)
+{
+       int i;
+
+       for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
+               led_classdev_unregister(&qt2160->leds[i].cdev);
+               cancel_work_sync(&qt2160->leds[i].work);
+       }
+}
+
+#else
+
+static inline int qt2160_register_leds(struct qt2160_data *qt2160)
+{
+       return 0;
+}
+
+static inline void qt2160_unregister_leds(struct qt2160_data *qt2160)
+{
+}
+
+#endif
 
 static bool qt2160_identify(struct i2c_client *client)
 {
@@ -249,7 +376,7 @@ static bool qt2160_identify(struct i2c_client *client)
 }
 
 static int qt2160_probe(struct i2c_client *client,
-                                 const struct i2c_device_id *id)
+                       const struct i2c_device_id *id)
 {
        struct qt2160_data *qt2160;
        struct input_dev *input;
@@ -314,11 +441,17 @@ static int qt2160_probe(struct i2c_client *client,
                }
        }
 
+       error = qt2160_register_leds(qt2160);
+       if (error) {
+               dev_err(&client->dev, "Failed to register leds\n");
+               goto err_free_irq;
+       }
+
        error = input_register_device(qt2160->input);
        if (error) {
                dev_err(&client->dev,
                        "Failed to register input device\n");
-               goto err_free_irq;
+               goto err_unregister_leds;
        }
 
        i2c_set_clientdata(client, qt2160);
@@ -326,6 +459,8 @@ static int qt2160_probe(struct i2c_client *client,
 
        return 0;
 
+err_unregister_leds:
+       qt2160_unregister_leds(qt2160);
 err_free_irq:
        if (client->irq)
                free_irq(client->irq, qt2160);
@@ -339,6 +474,8 @@ static int qt2160_remove(struct i2c_client *client)
 {
        struct qt2160_data *qt2160 = i2c_get_clientdata(client);
 
+       qt2160_unregister_leds(qt2160);
+
        /* Release IRQ so no queue will be scheduled */
        if (client->irq)
                free_irq(client->irq, qt2160);
index c76f96872d313f558eb9a75e6305a7d14e123d2c..d89e7d392d1ef38131a45e832f0e7596aa77cf68 100644 (file)
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
-#include <linux/input/tegra_kbc.h>
+#include <linux/input/matrix_keypad.h>
 #include <mach/clk.h>
 
+#define KBC_MAX_GPIO   24
+#define KBC_MAX_KPENT  8
+
+#define KBC_MAX_ROW    16
+#define KBC_MAX_COL    8
+#define KBC_MAX_KEY    (KBC_MAX_ROW * KBC_MAX_COL)
+
 #define KBC_MAX_DEBOUNCE_CNT   0x3ffu
 
 /* KBC row scan time and delay for beginning the row scan. */
 
 #define KBC_ROW_SHIFT  3
 
+enum tegra_pin_type {
+       PIN_CFG_IGNORE,
+       PIN_CFG_COL,
+       PIN_CFG_ROW,
+};
+
+struct tegra_kbc_pin_cfg {
+       enum tegra_pin_type type;
+       unsigned char num;
+};
+
 struct tegra_kbc {
+       struct device *dev;
+       unsigned int debounce_cnt;
+       unsigned int repeat_cnt;
+       struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+       const struct matrix_keymap_data *keymap_data;
+       bool wakeup;
        void __iomem *mmio;
        struct input_dev *idev;
-       unsigned int irq;
+       int irq;
        spinlock_t lock;
        unsigned int repoll_dly;
        unsigned long cp_dly_jiffies;
@@ -78,7 +102,6 @@ struct tegra_kbc {
        bool use_fn_map;
        bool use_ghost_filter;
        bool keypress_caused_wake;
-       const struct tegra_kbc_platform_data *pdata;
        unsigned short keycode[KBC_MAX_KEY * 2];
        unsigned short current_keys[KBC_MAX_KPENT];
        unsigned int num_pressed_keys;
@@ -87,147 +110,6 @@ struct tegra_kbc {
        struct clk *clk;
 };
 
-static const u32 tegra_kbc_default_keymap[] = {
-       KEY(0, 2, KEY_W),
-       KEY(0, 3, KEY_S),
-       KEY(0, 4, KEY_A),
-       KEY(0, 5, KEY_Z),
-       KEY(0, 7, KEY_FN),
-
-       KEY(1, 7, KEY_LEFTMETA),
-
-       KEY(2, 6, KEY_RIGHTALT),
-       KEY(2, 7, KEY_LEFTALT),
-
-       KEY(3, 0, KEY_5),
-       KEY(3, 1, KEY_4),
-       KEY(3, 2, KEY_R),
-       KEY(3, 3, KEY_E),
-       KEY(3, 4, KEY_F),
-       KEY(3, 5, KEY_D),
-       KEY(3, 6, KEY_X),
-
-       KEY(4, 0, KEY_7),
-       KEY(4, 1, KEY_6),
-       KEY(4, 2, KEY_T),
-       KEY(4, 3, KEY_H),
-       KEY(4, 4, KEY_G),
-       KEY(4, 5, KEY_V),
-       KEY(4, 6, KEY_C),
-       KEY(4, 7, KEY_SPACE),
-
-       KEY(5, 0, KEY_9),
-       KEY(5, 1, KEY_8),
-       KEY(5, 2, KEY_U),
-       KEY(5, 3, KEY_Y),
-       KEY(5, 4, KEY_J),
-       KEY(5, 5, KEY_N),
-       KEY(5, 6, KEY_B),
-       KEY(5, 7, KEY_BACKSLASH),
-
-       KEY(6, 0, KEY_MINUS),
-       KEY(6, 1, KEY_0),
-       KEY(6, 2, KEY_O),
-       KEY(6, 3, KEY_I),
-       KEY(6, 4, KEY_L),
-       KEY(6, 5, KEY_K),
-       KEY(6, 6, KEY_COMMA),
-       KEY(6, 7, KEY_M),
-
-       KEY(7, 1, KEY_EQUAL),
-       KEY(7, 2, KEY_RIGHTBRACE),
-       KEY(7, 3, KEY_ENTER),
-       KEY(7, 7, KEY_MENU),
-
-       KEY(8, 4, KEY_RIGHTSHIFT),
-       KEY(8, 5, KEY_LEFTSHIFT),
-
-       KEY(9, 5, KEY_RIGHTCTRL),
-       KEY(9, 7, KEY_LEFTCTRL),
-
-       KEY(11, 0, KEY_LEFTBRACE),
-       KEY(11, 1, KEY_P),
-       KEY(11, 2, KEY_APOSTROPHE),
-       KEY(11, 3, KEY_SEMICOLON),
-       KEY(11, 4, KEY_SLASH),
-       KEY(11, 5, KEY_DOT),
-
-       KEY(12, 0, KEY_F10),
-       KEY(12, 1, KEY_F9),
-       KEY(12, 2, KEY_BACKSPACE),
-       KEY(12, 3, KEY_3),
-       KEY(12, 4, KEY_2),
-       KEY(12, 5, KEY_UP),
-       KEY(12, 6, KEY_PRINT),
-       KEY(12, 7, KEY_PAUSE),
-
-       KEY(13, 0, KEY_INSERT),
-       KEY(13, 1, KEY_DELETE),
-       KEY(13, 3, KEY_PAGEUP),
-       KEY(13, 4, KEY_PAGEDOWN),
-       KEY(13, 5, KEY_RIGHT),
-       KEY(13, 6, KEY_DOWN),
-       KEY(13, 7, KEY_LEFT),
-
-       KEY(14, 0, KEY_F11),
-       KEY(14, 1, KEY_F12),
-       KEY(14, 2, KEY_F8),
-       KEY(14, 3, KEY_Q),
-       KEY(14, 4, KEY_F4),
-       KEY(14, 5, KEY_F3),
-       KEY(14, 6, KEY_1),
-       KEY(14, 7, KEY_F7),
-
-       KEY(15, 0, KEY_ESC),
-       KEY(15, 1, KEY_GRAVE),
-       KEY(15, 2, KEY_F5),
-       KEY(15, 3, KEY_TAB),
-       KEY(15, 4, KEY_F1),
-       KEY(15, 5, KEY_F2),
-       KEY(15, 6, KEY_CAPSLOCK),
-       KEY(15, 7, KEY_F6),
-
-       /* Software Handled Function Keys */
-       KEY(20, 0, KEY_KP7),
-
-       KEY(21, 0, KEY_KP9),
-       KEY(21, 1, KEY_KP8),
-       KEY(21, 2, KEY_KP4),
-       KEY(21, 4, KEY_KP1),
-
-       KEY(22, 1, KEY_KPSLASH),
-       KEY(22, 2, KEY_KP6),
-       KEY(22, 3, KEY_KP5),
-       KEY(22, 4, KEY_KP3),
-       KEY(22, 5, KEY_KP2),
-       KEY(22, 7, KEY_KP0),
-
-       KEY(27, 1, KEY_KPASTERISK),
-       KEY(27, 3, KEY_KPMINUS),
-       KEY(27, 4, KEY_KPPLUS),
-       KEY(27, 5, KEY_KPDOT),
-
-       KEY(28, 5, KEY_VOLUMEUP),
-
-       KEY(29, 3, KEY_HOME),
-       KEY(29, 4, KEY_END),
-       KEY(29, 5, KEY_BRIGHTNESSDOWN),
-       KEY(29, 6, KEY_VOLUMEDOWN),
-       KEY(29, 7, KEY_BRIGHTNESSUP),
-
-       KEY(30, 0, KEY_NUMLOCK),
-       KEY(30, 1, KEY_SCROLLLOCK),
-       KEY(30, 2, KEY_MUTE),
-
-       KEY(31, 4, KEY_HELP),
-};
-
-static const
-struct matrix_keymap_data tegra_kbc_default_keymap_data = {
-       .keymap         = tegra_kbc_default_keymap,
-       .keymap_size    = ARRAY_SIZE(tegra_kbc_default_keymap),
-};
-
 static void tegra_kbc_report_released_keys(struct input_dev *input,
                                           unsigned short old_keycodes[],
                                           unsigned int old_num_keys,
@@ -357,18 +239,6 @@ static void tegra_kbc_set_fifo_interrupt(struct tegra_kbc *kbc, bool enable)
        writel(val, kbc->mmio + KBC_CONTROL_0);
 }
 
-static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
-{
-       u32 val;
-
-       val = readl(kbc->mmio + KBC_CONTROL_0);
-       if (enable)
-               val |= KBC_CONTROL_KEYPRESS_INT_EN;
-       else
-               val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
-       writel(val, kbc->mmio + KBC_CONTROL_0);
-}
-
 static void tegra_kbc_keypress_timer(unsigned long data)
 {
        struct tegra_kbc *kbc = (struct tegra_kbc *)data;
@@ -439,12 +309,11 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
 
 static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
 {
-       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
        int i;
        unsigned int rst_val;
 
        /* Either mask all keys or none. */
-       rst_val = (filter && !pdata->wakeup) ? ~0 : 0;
+       rst_val = (filter && !kbc->wakeup) ? ~0 : 0;
 
        for (i = 0; i < KBC_MAX_ROW; i++)
                writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
@@ -452,7 +321,6 @@ static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
 
 static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
 {
-       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
        int i;
 
        for (i = 0; i < KBC_MAX_GPIO; i++) {
@@ -468,13 +336,13 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
                row_cfg &= ~r_mask;
                col_cfg &= ~c_mask;
 
-               switch (pdata->pin_cfg[i].type) {
+               switch (kbc->pin_cfg[i].type) {
                case PIN_CFG_ROW:
-                       row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+                       row_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << r_shft;
                        break;
 
                case PIN_CFG_COL:
-                       col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+                       col_cfg |= ((kbc->pin_cfg[i].num << 1) | 1) << c_shft;
                        break;
 
                case PIN_CFG_IGNORE:
@@ -488,7 +356,6 @@ static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
 
 static int tegra_kbc_start(struct tegra_kbc *kbc)
 {
-       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
        unsigned int debounce_cnt;
        u32 val = 0;
 
@@ -503,10 +370,10 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
        tegra_kbc_config_pins(kbc);
        tegra_kbc_setup_wakekeys(kbc, false);
 
-       writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+       writel(kbc->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
 
        /* Keyboard debounce count is maximum of 12 bits. */
-       debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+       debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
        val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
        val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
        val |= KBC_CONTROL_FIFO_CNT_INT_EN;  /* interrupt on FIFO threshold */
@@ -573,21 +440,20 @@ static void tegra_kbc_close(struct input_dev *dev)
        return tegra_kbc_stop(kbc);
 }
 
-static bool
-tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
-                       struct device *dev, unsigned int *num_rows)
+static bool tegra_kbc_check_pin_cfg(const struct tegra_kbc *kbc,
+                                       unsigned int *num_rows)
 {
        int i;
 
        *num_rows = 0;
 
        for (i = 0; i < KBC_MAX_GPIO; i++) {
-               const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+               const struct tegra_kbc_pin_cfg *pin_cfg = &kbc->pin_cfg[i];
 
                switch (pin_cfg->type) {
                case PIN_CFG_ROW:
                        if (pin_cfg->num >= KBC_MAX_ROW) {
-                               dev_err(dev,
+                               dev_err(kbc->dev,
                                        "pin_cfg[%d]: invalid row number %d\n",
                                        i, pin_cfg->num);
                                return false;
@@ -597,7 +463,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
 
                case PIN_CFG_COL:
                        if (pin_cfg->num >= KBC_MAX_COL) {
-                               dev_err(dev,
+                               dev_err(kbc->dev,
                                        "pin_cfg[%d]: invalid column number %d\n",
                                        i, pin_cfg->num);
                                return false;
@@ -608,7 +474,7 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
                        break;
 
                default:
-                       dev_err(dev,
+                       dev_err(kbc->dev,
                                "pin_cfg[%d]: invalid entry type %d\n",
                                pin_cfg->type, pin_cfg->num);
                        return false;
@@ -618,154 +484,140 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
        return true;
 }
 
-#ifdef CONFIG_OF
-static struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
-       struct platform_device *pdev)
+static int tegra_kbc_parse_dt(struct tegra_kbc *kbc)
 {
-       struct tegra_kbc_platform_data *pdata;
-       struct device_node *np = pdev->dev.of_node;
+       struct device_node *np = kbc->dev->of_node;
        u32 prop;
        int i;
-
-       if (!np)
-               return NULL;
-
-       pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
-       if (!pdata)
-               return NULL;
+       u32 num_rows = 0;
+       u32 num_cols = 0;
+       u32 cols_cfg[KBC_MAX_GPIO];
+       u32 rows_cfg[KBC_MAX_GPIO];
+       int proplen;
+       int ret;
 
        if (!of_property_read_u32(np, "nvidia,debounce-delay-ms", &prop))
-               pdata->debounce_cnt = prop;
+               kbc->debounce_cnt = prop;
 
        if (!of_property_read_u32(np, "nvidia,repeat-delay-ms", &prop))
-               pdata->repeat_cnt = prop;
+               kbc->repeat_cnt = prop;
 
        if (of_find_property(np, "nvidia,needs-ghost-filter", NULL))
-               pdata->use_ghost_filter = true;
+               kbc->use_ghost_filter = true;
 
        if (of_find_property(np, "nvidia,wakeup-source", NULL))
-               pdata->wakeup = true;
+               kbc->wakeup = true;
 
-       /*
-        * All currently known keymaps with device tree support use the same
-        * pin_cfg, so set it up here.
-        */
-       for (i = 0; i < KBC_MAX_ROW; i++) {
-               pdata->pin_cfg[i].num = i;
-               pdata->pin_cfg[i].type = PIN_CFG_ROW;
+       if (!of_get_property(np, "nvidia,kbc-row-pins", &proplen)) {
+               dev_err(kbc->dev, "property nvidia,kbc-row-pins not found\n");
+               return -ENOENT;
        }
+       num_rows = proplen / sizeof(u32);
 
-       for (i = 0; i < KBC_MAX_COL; i++) {
-               pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
-               pdata->pin_cfg[KBC_MAX_ROW + i].type = PIN_CFG_COL;
+       if (!of_get_property(np, "nvidia,kbc-col-pins", &proplen)) {
+               dev_err(kbc->dev, "property nvidia,kbc-col-pins not found\n");
+               return -ENOENT;
        }
+       num_cols = proplen / sizeof(u32);
 
-       return pdata;
-}
-#else
-static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
-       struct platform_device *pdev)
-{
-       return NULL;
-}
-#endif
+       if (!of_get_property(np, "linux,keymap", &proplen)) {
+               dev_err(kbc->dev, "property linux,keymap not found\n");
+               return -ENOENT;
+       }
 
-static int tegra_kbd_setup_keymap(struct tegra_kbc *kbc)
-{
-       const struct tegra_kbc_platform_data *pdata = kbc->pdata;
-       const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
-       unsigned int keymap_rows = KBC_MAX_KEY;
-       int retval;
+       if (!num_rows || !num_cols || ((num_rows + num_cols) > KBC_MAX_GPIO)) {
+               dev_err(kbc->dev,
+                       "keypad rows/columns not porperly specified\n");
+               return -EINVAL;
+       }
 
-       if (keymap_data && pdata->use_fn_map)
-               keymap_rows *= 2;
+       /* Set all pins as non-configured */
+       for (i = 0; i < KBC_MAX_GPIO; i++)
+               kbc->pin_cfg[i].type = PIN_CFG_IGNORE;
 
-       retval = matrix_keypad_build_keymap(keymap_data, NULL,
-                                           keymap_rows, KBC_MAX_COL,
-                                           kbc->keycode, kbc->idev);
-       if (retval == -ENOSYS || retval == -ENOENT) {
-               /*
-                * If there is no OF support in kernel or keymap
-                * property is missing, use default keymap.
-                */
-               retval = matrix_keypad_build_keymap(
-                                       &tegra_kbc_default_keymap_data, NULL,
-                                       keymap_rows, KBC_MAX_COL,
-                                       kbc->keycode, kbc->idev);
+       ret = of_property_read_u32_array(np, "nvidia,kbc-row-pins",
+                               rows_cfg, num_rows);
+       if (ret < 0) {
+               dev_err(kbc->dev, "Rows configurations are not proper\n");
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32_array(np, "nvidia,kbc-col-pins",
+                               cols_cfg, num_cols);
+       if (ret < 0) {
+               dev_err(kbc->dev, "Cols configurations are not proper\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_rows; i++) {
+               kbc->pin_cfg[rows_cfg[i]].type = PIN_CFG_ROW;
+               kbc->pin_cfg[rows_cfg[i]].num = i;
        }
 
-       return retval;
+       for (i = 0; i < num_cols; i++) {
+               kbc->pin_cfg[cols_cfg[i]].type = PIN_CFG_COL;
+               kbc->pin_cfg[cols_cfg[i]].num = i;
+       }
+
+       return 0;
 }
 
 static int tegra_kbc_probe(struct platform_device *pdev)
 {
-       const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
        struct tegra_kbc *kbc;
-       struct input_dev *input_dev;
        struct resource *res;
-       int irq;
        int err;
        int num_rows = 0;
        unsigned int debounce_cnt;
        unsigned int scan_time_rows;
+       unsigned int keymap_rows = KBC_MAX_KEY;
 
-       if (!pdata)
-               pdata = tegra_kbc_dt_parse_pdata(pdev);
+       kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
+       if (!kbc) {
+               dev_err(&pdev->dev, "failed to alloc memory for kbc\n");
+               return -ENOMEM;
+       }
 
-       if (!pdata)
-               return -EINVAL;
+       kbc->dev = &pdev->dev;
+       spin_lock_init(&kbc->lock);
 
-       if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
-               err = -EINVAL;
-               goto err_free_pdata;
-       }
+       err = tegra_kbc_parse_dt(kbc);
+       if (err)
+               return err;
+
+       if (!tegra_kbc_check_pin_cfg(kbc, &num_rows))
+               return -EINVAL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(&pdev->dev, "failed to get I/O memory\n");
-               err = -ENXIO;
-               goto err_free_pdata;
+               return -ENXIO;
        }
 
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
+       kbc->irq = platform_get_irq(pdev, 0);
+       if (kbc->irq < 0) {
                dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
-               err = -ENXIO;
-               goto err_free_pdata;
+               return -ENXIO;
        }
 
-       kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
-       input_dev = input_allocate_device();
-       if (!kbc || !input_dev) {
-               err = -ENOMEM;
-               goto err_free_mem;
+       kbc->idev = devm_input_allocate_device(&pdev->dev);
+       if (!kbc->idev) {
+               dev_err(&pdev->dev, "failed to allocate input device\n");
+               return -ENOMEM;
        }
 
-       kbc->pdata = pdata;
-       kbc->idev = input_dev;
-       kbc->irq = irq;
-       spin_lock_init(&kbc->lock);
        setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
 
-       res = request_mem_region(res->start, resource_size(res), pdev->name);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to request I/O memory\n");
-               err = -EBUSY;
-               goto err_free_mem;
-       }
-
-       kbc->mmio = ioremap(res->start, resource_size(res));
+       kbc->mmio = devm_request_and_ioremap(&pdev->dev, res);
        if (!kbc->mmio) {
-               dev_err(&pdev->dev, "failed to remap I/O memory\n");
-               err = -ENXIO;
-               goto err_free_mem_region;
+               dev_err(&pdev->dev, "Cannot request memregion/iomap address\n");
+               return -EBUSY;
        }
 
-       kbc->clk = clk_get(&pdev->dev, NULL);
+       kbc->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(kbc->clk)) {
                dev_err(&pdev->dev, "failed to get keyboard clock\n");
-               err = PTR_ERR(kbc->clk);
-               goto err_iounmap;
+               return PTR_ERR(kbc->clk);
        }
 
        /*
@@ -774,37 +626,38 @@ static int tegra_kbc_probe(struct platform_device *pdev)
         * the rows. There is an additional delay before the row scanning
         * starts. The repoll delay is computed in milliseconds.
         */
-       debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+       debounce_cnt = min(kbc->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
        scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
-       kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+       kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + kbc->repeat_cnt;
        kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS);
 
-       kbc->wakeup_key = pdata->wakeup_key;
-       kbc->use_fn_map = pdata->use_fn_map;
-       kbc->use_ghost_filter = pdata->use_ghost_filter;
+       kbc->idev->name = pdev->name;
+       kbc->idev->id.bustype = BUS_HOST;
+       kbc->idev->dev.parent = &pdev->dev;
+       kbc->idev->open = tegra_kbc_open;
+       kbc->idev->close = tegra_kbc_close;
 
-       input_dev->name = pdev->name;
-       input_dev->id.bustype = BUS_HOST;
-       input_dev->dev.parent = &pdev->dev;
-       input_dev->open = tegra_kbc_open;
-       input_dev->close = tegra_kbc_close;
+       if (kbc->keymap_data && kbc->use_fn_map)
+               keymap_rows *= 2;
 
-       err = tegra_kbd_setup_keymap(kbc);
+       err = matrix_keypad_build_keymap(kbc->keymap_data, NULL,
+                                        keymap_rows, KBC_MAX_COL,
+                                        kbc->keycode, kbc->idev);
        if (err) {
                dev_err(&pdev->dev, "failed to setup keymap\n");
-               goto err_put_clk;
+               return err;
        }
 
-       __set_bit(EV_REP, input_dev->evbit);
-       input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+       __set_bit(EV_REP, kbc->idev->evbit);
+       input_set_capability(kbc->idev, EV_MSC, MSC_SCAN);
 
-       input_set_drvdata(input_dev, kbc);
+       input_set_drvdata(kbc->idev, kbc);
 
-       err = request_irq(kbc->irq, tegra_kbc_isr,
+       err = devm_request_irq(&pdev->dev, kbc->irq, tegra_kbc_isr,
                          IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
        if (err) {
                dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
-               goto err_put_clk;
+               return err;
        }
 
        disable_irq(kbc->irq);
@@ -812,60 +665,28 @@ static int tegra_kbc_probe(struct platform_device *pdev)
        err = input_register_device(kbc->idev);
        if (err) {
                dev_err(&pdev->dev, "failed to register input device\n");
-               goto err_free_irq;
+               return err;
        }
 
        platform_set_drvdata(pdev, kbc);
-       device_init_wakeup(&pdev->dev, pdata->wakeup);
+       device_init_wakeup(&pdev->dev, kbc->wakeup);
 
        return 0;
-
-err_free_irq:
-       free_irq(kbc->irq, pdev);
-err_put_clk:
-       clk_put(kbc->clk);
-err_iounmap:
-       iounmap(kbc->mmio);
-err_free_mem_region:
-       release_mem_region(res->start, resource_size(res));
-err_free_mem:
-       input_free_device(input_dev);
-       kfree(kbc);
-err_free_pdata:
-       if (!pdev->dev.platform_data)
-               kfree(pdata);
-
-       return err;
 }
 
-static int tegra_kbc_remove(struct platform_device *pdev)
+#ifdef CONFIG_PM_SLEEP
+static void tegra_kbc_set_keypress_interrupt(struct tegra_kbc *kbc, bool enable)
 {
-       struct tegra_kbc *kbc = platform_get_drvdata(pdev);
-       struct resource *res;
-
-       platform_set_drvdata(pdev, NULL);
-
-       free_irq(kbc->irq, pdev);
-       clk_put(kbc->clk);
-
-       input_unregister_device(kbc->idev);
-       iounmap(kbc->mmio);
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
-       /*
-        * If we do not have platform data attached to the device we
-        * allocated it ourselves and thus need to free it.
-        */
-       if (!pdev->dev.platform_data)
-               kfree(kbc->pdata);
-
-       kfree(kbc);
+       u32 val;
 
-       return 0;
+       val = readl(kbc->mmio + KBC_CONTROL_0);
+       if (enable)
+               val |= KBC_CONTROL_KEYPRESS_INT_EN;
+       else
+               val &= ~KBC_CONTROL_KEYPRESS_INT_EN;
+       writel(val, kbc->mmio + KBC_CONTROL_0);
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int tegra_kbc_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -954,7 +775,6 @@ MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
 
 static struct platform_driver tegra_kbc_driver = {
        .probe          = tegra_kbc_probe,
-       .remove         = tegra_kbc_remove,
        .driver = {
                .name   = "tegra-kbc",
                .owner  = THIS_MODULE,
index 1cf72fe513e6934af7fb4ab0d5742c3155537146..0735de3a6468f85cad2c08a1dc601d5a67a67a23 100644 (file)
@@ -232,7 +232,7 @@ static const struct adxl34x_platform_data adxl34x_default_init = {
 
        .ev_code_tap = {BTN_TOUCH, BTN_TOUCH, BTN_TOUCH}, /* EV_KEY {x,y,z} */
        .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK,
-       .fifo_mode = FIFO_STREAM,
+       .fifo_mode = ADXL_FIFO_STREAM,
        .watermark = 0,
 };
 
@@ -732,7 +732,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
        mutex_init(&ac->mutex);
 
        input_dev->name = "ADXL34x accelerometer";
-       revid = ac->bops->read(dev, DEVID);
+       revid = AC_READ(ac, DEVID);
 
        switch (revid) {
        case ID_ADXL345:
@@ -809,7 +809,7 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
        if (FIFO_MODE(pdata->fifo_mode) == FIFO_BYPASS)
                ac->fifo_delay = false;
 
-       ac->bops->write(dev, POWER_CTL, 0);
+       AC_WRITE(ac, POWER_CTL, 0);
 
        err = request_threaded_irq(ac->irq, NULL, adxl34x_irq,
                                   IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -827,7 +827,6 @@ struct adxl34x *adxl34x_probe(struct device *dev, int irq,
        if (err)
                goto err_remove_attr;
 
-       AC_WRITE(ac, THRESH_TAP, pdata->tap_threshold);
        AC_WRITE(ac, OFSX, pdata->x_axis_offset);
        ac->hwcal.x = pdata->x_axis_offset;
        AC_WRITE(ac, OFSY, pdata->y_axis_offset);
index 08ffcabd7220f5f1f5e98c16abc9f953963ddd61..865c2f9d25b9ff9cb55f63505852d89c06cf9687 100644 (file)
 #define BMA150_POLL_MAX                200
 #define BMA150_POLL_MIN                0
 
-#define BMA150_BW_25HZ         0
-#define BMA150_BW_50HZ         1
-#define BMA150_BW_100HZ                2
-#define BMA150_BW_190HZ                3
-#define BMA150_BW_375HZ                4
-#define BMA150_BW_750HZ                5
-#define BMA150_BW_1500HZ       6
-
-#define BMA150_RANGE_2G                0
-#define BMA150_RANGE_4G                1
-#define BMA150_RANGE_8G                2
-
 #define BMA150_MODE_NORMAL     0
 #define BMA150_MODE_SLEEP      2
 #define BMA150_MODE_WAKE_UP    3
@@ -372,7 +360,7 @@ static int bma150_open(struct bma150_data *bma150)
        int error;
 
        error = pm_runtime_get_sync(&bma150->client->dev);
-       if (error && error != -ENOSYS)
+       if (error < 0 && error != -ENOSYS)
                return error;
 
        /*
index 78eb6b30580acdf5aa86096ecef01b653ffec3f6..68a5f33152a8cd8914d39245257735d0e5ecae38 100644 (file)
@@ -43,7 +43,6 @@ struct vibra_info {
        struct device           *dev;
        struct input_dev        *input_dev;
 
-       struct workqueue_struct *workqueue;
        struct work_struct      play_work;
 
        bool                    enabled;
@@ -143,19 +142,7 @@ static int vibra_play(struct input_dev *input, void *data,
        if (!info->speed)
                info->speed = effect->u.rumble.weak_magnitude >> 9;
        info->direction = effect->direction < EFFECT_DIR_180_DEG ? 0 : 1;
-       queue_work(info->workqueue, &info->play_work);
-       return 0;
-}
-
-static int twl4030_vibra_open(struct input_dev *input)
-{
-       struct vibra_info *info = input_get_drvdata(input);
-
-       info->workqueue = create_singlethread_workqueue("vibra");
-       if (info->workqueue == NULL) {
-               dev_err(&input->dev, "couldn't create workqueue\n");
-               return -ENOMEM;
-       }
+       schedule_work(&info->play_work);
        return 0;
 }
 
@@ -164,9 +151,6 @@ static void twl4030_vibra_close(struct input_dev *input)
        struct vibra_info *info = input_get_drvdata(input);
 
        cancel_work_sync(&info->play_work);
-       INIT_WORK(&info->play_work, vibra_play_work); /* cleanup */
-       destroy_workqueue(info->workqueue);
-       info->workqueue = NULL;
 
        if (info->enabled)
                vibra_disable(info);
@@ -219,7 +203,7 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
@@ -227,11 +211,10 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
        info->coexist = twl4030_vibra_check_coexist(pdata, twl4030_core_node);
        INIT_WORK(&info->play_work, vibra_play_work);
 
-       info->input_dev = input_allocate_device();
+       info->input_dev = devm_input_allocate_device(&pdev->dev);
        if (info->input_dev == NULL) {
                dev_err(&pdev->dev, "couldn't allocate input device\n");
-               ret = -ENOMEM;
-               goto err_kzalloc;
+               return -ENOMEM;
        }
 
        input_set_drvdata(info->input_dev, info);
@@ -239,14 +222,13 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
        info->input_dev->name = "twl4030:vibrator";
        info->input_dev->id.version = 1;
        info->input_dev->dev.parent = pdev->dev.parent;
-       info->input_dev->open = twl4030_vibra_open;
        info->input_dev->close = twl4030_vibra_close;
        __set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
        ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
        if (ret < 0) {
                dev_dbg(&pdev->dev, "couldn't register vibrator to FF\n");
-               goto err_ialloc;
+               return ret;
        }
 
        ret = input_register_device(info->input_dev);
@@ -262,28 +244,11 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
 
 err_iff:
        input_ff_destroy(info->input_dev);
-err_ialloc:
-       input_free_device(info->input_dev);
-err_kzalloc:
-       kfree(info);
        return ret;
 }
 
-static int twl4030_vibra_remove(struct platform_device *pdev)
-{
-       struct vibra_info *info = platform_get_drvdata(pdev);
-
-       /* this also free ff-memless and calls close if needed */
-       input_unregister_device(info->input_dev);
-       kfree(info);
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
 static struct platform_driver twl4030_vibra_driver = {
        .probe          = twl4030_vibra_probe,
-       .remove         = twl4030_vibra_remove,
        .driver         = {
                .name   = "twl4030-vibra",
                .owner  = THIS_MODULE,
index 71a28ee699f3ef52732e7cd601f26d1ed2eb72c0..0c2dfc8e96918c3e022aad1920ece05e1115abd8 100644 (file)
@@ -275,7 +275,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info) {
                dev_err(&pdev->dev, "couldn't allocate memory\n");
                return -ENOMEM;
@@ -309,53 +309,23 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
        if ((!info->vibldrv_res && !info->viblmotor_res) ||
            (!info->vibrdrv_res && !info->vibrmotor_res)) {
                dev_err(info->dev, "invalid vibra driver/motor resistance\n");
-               ret = -EINVAL;
-               goto err_kzalloc;
+               return -EINVAL;
        }
 
        info->irq = platform_get_irq(pdev, 0);
        if (info->irq < 0) {
                dev_err(info->dev, "invalid irq\n");
-               ret = -EINVAL;
-               goto err_kzalloc;
+               return -EINVAL;
        }
 
        mutex_init(&info->mutex);
 
-       info->input_dev = input_allocate_device();
-       if (info->input_dev == NULL) {
-               dev_err(info->dev, "couldn't allocate input device\n");
-               ret = -ENOMEM;
-               goto err_kzalloc;
-       }
-
-       input_set_drvdata(info->input_dev, info);
-
-       info->input_dev->name = "twl6040:vibrator";
-       info->input_dev->id.version = 1;
-       info->input_dev->dev.parent = pdev->dev.parent;
-       info->input_dev->close = twl6040_vibra_close;
-       __set_bit(FF_RUMBLE, info->input_dev->ffbit);
-
-       ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
-       if (ret < 0) {
-               dev_err(info->dev, "couldn't register vibrator to FF\n");
-               goto err_ialloc;
-       }
-
-       ret = input_register_device(info->input_dev);
-       if (ret < 0) {
-               dev_err(info->dev, "couldn't register input device\n");
-               goto err_iff;
-       }
-
-       platform_set_drvdata(pdev, info);
-
-       ret = request_threaded_irq(info->irq, NULL, twl6040_vib_irq_handler, 0,
-                                  "twl6040_irq_vib", info);
+       ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
+                                       twl6040_vib_irq_handler, 0,
+                                       "twl6040_irq_vib", info);
        if (ret) {
                dev_err(info->dev, "VIB IRQ request failed: %d\n", ret);
-               goto err_irq;
+               return ret;
        }
 
        info->supplies[0].supply = "vddvibl";
@@ -368,7 +338,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
                                 ARRAY_SIZE(info->supplies), info->supplies);
        if (ret) {
                dev_err(info->dev, "couldn't get regulators %d\n", ret);
-               goto err_regulator;
+               return ret;
        }
 
        if (vddvibl_uV) {
@@ -377,7 +347,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(info->dev, "failed to set VDDVIBL volt %d\n",
                                ret);
-                       goto err_voltage;
+                       goto err_regulator;
                }
        }
 
@@ -387,34 +357,49 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
                if (ret) {
                        dev_err(info->dev, "failed to set VDDVIBR volt %d\n",
                                ret);
-                       goto err_voltage;
+                       goto err_regulator;
                }
        }
 
-       info->workqueue = alloc_workqueue("twl6040-vibra", 0, 0);
-       if (info->workqueue == NULL) {
-               dev_err(info->dev, "couldn't create workqueue\n");
+       INIT_WORK(&info->play_work, vibra_play_work);
+
+       info->input_dev = input_allocate_device();
+       if (info->input_dev == NULL) {
+               dev_err(info->dev, "couldn't allocate input device\n");
                ret = -ENOMEM;
-               goto err_voltage;
+               goto err_regulator;
        }
-       INIT_WORK(&info->play_work, vibra_play_work);
+
+       input_set_drvdata(info->input_dev, info);
+
+       info->input_dev->name = "twl6040:vibrator";
+       info->input_dev->id.version = 1;
+       info->input_dev->dev.parent = pdev->dev.parent;
+       info->input_dev->close = twl6040_vibra_close;
+       __set_bit(FF_RUMBLE, info->input_dev->ffbit);
+
+       ret = input_ff_create_memless(info->input_dev, NULL, vibra_play);
+       if (ret < 0) {
+               dev_err(info->dev, "couldn't register vibrator to FF\n");
+               goto err_ialloc;
+       }
+
+       ret = input_register_device(info->input_dev);
+       if (ret < 0) {
+               dev_err(info->dev, "couldn't register input device\n");
+               goto err_iff;
+       }
+
+       platform_set_drvdata(pdev, info);
 
        return 0;
 
-err_voltage:
-       regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
-err_regulator:
-       free_irq(info->irq, info);
-err_irq:
-       input_unregister_device(info->input_dev);
-       info->input_dev = NULL;
 err_iff:
-       if (info->input_dev)
-               input_ff_destroy(info->input_dev);
+       input_ff_destroy(info->input_dev);
 err_ialloc:
        input_free_device(info->input_dev);
-err_kzalloc:
-       kfree(info);
+err_regulator:
+       regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
        return ret;
 }
 
@@ -423,10 +408,7 @@ static int twl6040_vibra_remove(struct platform_device *pdev)
        struct vibra_info *info = platform_get_drvdata(pdev);
 
        input_unregister_device(info->input_dev);
-       free_irq(info->irq, info);
        regulator_bulk_free(ARRAY_SIZE(info->supplies), info->supplies);
-       destroy_workqueue(info->workqueue);
-       kfree(info);
 
        return 0;
 }
index 558767d8ebf43d501d77cb3277a706a722a92681..caa2c4068f0924f8070c705501f6771e6f0152fe 100644 (file)
@@ -86,7 +86,7 @@ static int wm831x_on_probe(struct platform_device *pdev)
        wm831x_on->wm831x = wm831x;
        INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on);
 
-       wm831x_on->dev = input_allocate_device();
+       wm831x_on->dev = devm_input_allocate_device(&pdev->dev);
        if (!wm831x_on->dev) {
                dev_err(&pdev->dev, "Can't allocate input dev\n");
                ret = -ENOMEM;
@@ -119,7 +119,6 @@ static int wm831x_on_probe(struct platform_device *pdev)
 err_irq:
        free_irq(irq, wm831x_on);
 err_input_dev:
-       input_free_device(wm831x_on->dev);
 err:
        return ret;
 }
@@ -131,7 +130,6 @@ static int wm831x_on_remove(struct platform_device *pdev)
 
        free_irq(irq, wm831x_on);
        cancel_delayed_work_sync(&wm831x_on->work);
-       input_unregister_device(wm831x_on->dev);
 
        return 0;
 }
index cd6268cf7cd5d2f99115eba326eb65763236507d..802bd6a72d736422ec21e2c4d655b78635d014cf 100644 (file)
@@ -68,6 +68,16 @@ config MOUSE_PS2_SYNAPTICS
 
          If unsure, say Y.
 
+config MOUSE_PS2_CYPRESS
+       bool "Cypress PS/2 mouse protocol extension" if EXPERT
+       default y
+       depends on MOUSE_PS2
+       help
+         Say Y here if you have a Cypress PS/2 Trackpad connected to
+         your system.
+
+         If unsure, say Y.
+
 config MOUSE_PS2_LIFEBOOK
        bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
        default y
@@ -193,6 +203,18 @@ config MOUSE_BCM5974
          To compile this driver as a module, choose M here: the
          module will be called bcm5974.
 
+config MOUSE_CYAPA
+       tristate "Cypress APA I2C Trackpad support"
+       depends on I2C
+       help
+         This driver adds support for Cypress All Points Addressable (APA)
+         I2C Trackpads, including the ones used in 2012 Samsung Chromebooks.
+
+         Say Y here if you have a Cypress APA I2C Trackpad.
+
+         To compile this driver as a module, choose M here: the module will be
+         called cyapa.
+
 config MOUSE_INPORT
        tristate "InPort/MS/ATIXL busmouse"
        depends on ISA
index 46ba7556fd4fea020f97c7cb6ac290140d5f828d..c25efdb3f288b0284e041c82dca3cbf42b122b9a 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_MOUSE_AMIGA)               += amimouse.o
 obj-$(CONFIG_MOUSE_APPLETOUCH)         += appletouch.o
 obj-$(CONFIG_MOUSE_ATARI)              += atarimouse.o
 obj-$(CONFIG_MOUSE_BCM5974)            += bcm5974.o
+obj-$(CONFIG_MOUSE_CYAPA)              += cyapa.o
 obj-$(CONFIG_MOUSE_GPIO)               += gpio_mouse.o
 obj-$(CONFIG_MOUSE_INPORT)             += inport.o
 obj-$(CONFIG_MOUSE_LOGIBM)             += logibm.o
@@ -32,3 +33,4 @@ psmouse-$(CONFIG_MOUSE_PS2_LIFEBOOK)  += lifebook.o
 psmouse-$(CONFIG_MOUSE_PS2_SENTELIC)   += sentelic.o
 psmouse-$(CONFIG_MOUSE_PS2_TRACKPOINT) += trackpoint.o
 psmouse-$(CONFIG_MOUSE_PS2_TOUCHKIT)   += touchkit_ps2.o
+psmouse-$(CONFIG_MOUSE_PS2_CYPRESS)    += cypress_ps2.o
index e229fa3cad965e7bdbe8ae1b5358f2335be5394f..7b99fc7c9438dbcbcc115248403e4b9704611f29 100644 (file)
 /*
  * Definitions for ALPS version 3 and 4 command mode protocol
  */
-#define ALPS_V3_X_MAX  2000
-#define ALPS_V3_Y_MAX  1400
-
-#define ALPS_BITMAP_X_BITS     15
-#define ALPS_BITMAP_Y_BITS     11
-
 #define ALPS_CMD_NIBBLE_10     0x01f2
 
+#define ALPS_REG_BASE_RUSHMORE 0xc2c0
+#define ALPS_REG_BASE_PINNACLE 0x0000
+
 static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
        { PSMOUSE_CMD_SETPOLL,          0x00 }, /* 0 */
        { PSMOUSE_CMD_RESET_DIS,        0x00 }, /* 1 */
@@ -109,11 +106,14 @@ static const struct alps_model_info alps_model_data[] = {
        { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },           /* Dell Vostro 1400 */
        { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
                ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },                            /* Toshiba Tecra A11-11L */
-       { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
-       { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
        { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
 };
 
+static void alps_set_abs_params_st(struct alps_data *priv,
+                                  struct input_dev *dev1);
+static void alps_set_abs_params_mt(struct alps_data *priv,
+                                  struct input_dev *dev1);
+
 /*
  * XXX - this entry is suspicious. First byte has zero lower nibble,
  * which is what a normal mouse would report. Also, the value 0x0e
@@ -122,10 +122,10 @@ static const struct alps_model_info alps_model_data[] = {
 
 /* Packet formats are described in Documentation/input/alps.txt */
 
-static bool alps_is_valid_first_byte(const struct alps_model_info *model,
+static bool alps_is_valid_first_byte(struct alps_data *priv,
                                     unsigned char data)
 {
-       return (data & model->mask0) == model->byte0;
+       return (data & priv->mask0) == priv->byte0;
 }
 
 static void alps_report_buttons(struct psmouse *psmouse,
@@ -158,14 +158,13 @@ static void alps_report_buttons(struct psmouse *psmouse,
 static void alps_process_packet_v1_v2(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
-       const struct alps_model_info *model = priv->i;
        unsigned char *packet = psmouse->packet;
        struct input_dev *dev = psmouse->dev;
        struct input_dev *dev2 = priv->dev2;
        int x, y, z, ges, fin, left, right, middle;
        int back = 0, forward = 0;
 
-       if (model->proto_version == ALPS_PROTO_V1) {
+       if (priv->proto_version == ALPS_PROTO_V1) {
                left = packet[2] & 0x10;
                right = packet[2] & 0x08;
                middle = 0;
@@ -181,12 +180,12 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                z = packet[5];
        }
 
-       if (model->flags & ALPS_FW_BK_1) {
+       if (priv->flags & ALPS_FW_BK_1) {
                back = packet[0] & 0x10;
                forward = packet[2] & 4;
        }
 
-       if (model->flags & ALPS_FW_BK_2) {
+       if (priv->flags & ALPS_FW_BK_2) {
                back = packet[3] & 4;
                forward = packet[2] & 4;
                if ((middle = forward && back))
@@ -196,7 +195,7 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
        ges = packet[2] & 1;
        fin = packet[2] & 2;
 
-       if ((model->flags & ALPS_DUALPOINT) && z == 127) {
+       if ((priv->flags & ALPS_DUALPOINT) && z == 127) {
                input_report_rel(dev2, REL_X,  (x > 383 ? (x - 768) : x));
                input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
 
@@ -239,15 +238,15 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
        input_report_abs(dev, ABS_PRESSURE, z);
        input_report_key(dev, BTN_TOOL_FINGER, z > 0);
 
-       if (model->flags & ALPS_WHEEL)
+       if (priv->flags & ALPS_WHEEL)
                input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07));
 
-       if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+       if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
                input_report_key(dev, BTN_FORWARD, forward);
                input_report_key(dev, BTN_BACK, back);
        }
 
-       if (model->flags & ALPS_FOUR_BUTTONS) {
+       if (priv->flags & ALPS_FOUR_BUTTONS) {
                input_report_key(dev, BTN_0, packet[2] & 4);
                input_report_key(dev, BTN_1, packet[0] & 0x10);
                input_report_key(dev, BTN_2, packet[3] & 4);
@@ -267,7 +266,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
  * These points are returned in x1, y1, x2, and y2 when the return value
  * is greater than 0.
  */
-static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+static int alps_process_bitmap(struct alps_data *priv,
+                              unsigned int x_map, unsigned int y_map,
                               int *x1, int *y1, int *x2, int *y2)
 {
        struct alps_bitmap_point {
@@ -309,7 +309,7 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
         * y bitmap is reversed for what we need (lower positions are in
         * higher bits), so we process from the top end.
         */
-       y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+       y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - priv->y_bits);
        prev_bit = 0;
        point = &y_low;
        for (i = 0; y_map != 0; i++, y_map <<= 1) {
@@ -355,16 +355,18 @@ static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
                }
        }
 
-       *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
-             (2 * (ALPS_BITMAP_X_BITS - 1));
-       *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
-             (2 * (ALPS_BITMAP_Y_BITS - 1));
+       *x1 = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+             (2 * (priv->x_bits - 1));
+       *y1 = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+             (2 * (priv->y_bits - 1));
 
        if (fingers > 1) {
-               *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
-                     (2 * (ALPS_BITMAP_X_BITS - 1));
-               *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
-                     (2 * (ALPS_BITMAP_Y_BITS - 1));
+               *x2 = (priv->x_max *
+                      (2 * x_high.start_bit + x_high.num_bits - 1)) /
+                     (2 * (priv->x_bits - 1));
+               *y2 = (priv->y_max *
+                      (2 * y_high.start_bit + y_high.num_bits - 1)) /
+                     (2 * (priv->y_bits - 1));
        }
 
        return fingers;
@@ -448,17 +450,57 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
        return;
 }
 
+static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
+{
+       f->left = !!(p[3] & 0x01);
+       f->right = !!(p[3] & 0x02);
+       f->middle = !!(p[3] & 0x04);
+
+       f->ts_left = !!(p[3] & 0x10);
+       f->ts_right = !!(p[3] & 0x20);
+       f->ts_middle = !!(p[3] & 0x40);
+}
+
+static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p)
+{
+       f->first_mp = !!(p[4] & 0x40);
+       f->is_mp = !!(p[0] & 0x40);
+
+       f->fingers = (p[5] & 0x3) + 1;
+       f->x_map = ((p[4] & 0x7e) << 8) |
+                  ((p[1] & 0x7f) << 2) |
+                  ((p[0] & 0x30) >> 4);
+       f->y_map = ((p[3] & 0x70) << 4) |
+                  ((p[2] & 0x7f) << 1) |
+                  (p[4] & 0x01);
+
+       f->x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
+              ((p[0] & 0x30) >> 4);
+       f->y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
+       f->z = p[5] & 0x7f;
+
+       alps_decode_buttons_v3(f, p);
+}
+
+static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
+{
+       alps_decode_pinnacle(f, p);
+
+       f->x_map |= (p[5] & 0x10) << 11;
+       f->y_map |= (p[5] & 0x20) << 6;
+}
+
 static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
        unsigned char *packet = psmouse->packet;
        struct input_dev *dev = psmouse->dev;
        struct input_dev *dev2 = priv->dev2;
-       int x, y, z;
-       int left, right, middle;
        int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
        int fingers = 0, bmap_fingers;
-       unsigned int x_bitmap, y_bitmap;
+       struct alps_fields f;
+
+       priv->decode_fields(&f, packet);
 
        /*
         * There's no single feature of touchpad position and bitmap packets
@@ -473,16 +515,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
                 * packet. Check for this, and when it happens process the
                 * position packet as usual.
                 */
-               if (packet[0] & 0x40) {
-                       fingers = (packet[5] & 0x3) + 1;
-                       x_bitmap = ((packet[4] & 0x7e) << 8) |
-                                  ((packet[1] & 0x7f) << 2) |
-                                  ((packet[0] & 0x30) >> 4);
-                       y_bitmap = ((packet[3] & 0x70) << 4) |
-                                  ((packet[2] & 0x7f) << 1) |
-                                  (packet[4] & 0x01);
-
-                       bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+               if (f.is_mp) {
+                       fingers = f.fingers;
+                       bmap_fingers = alps_process_bitmap(priv,
+                                                          f.x_map, f.y_map,
                                                           &x1, &y1, &x2, &y2);
 
                        /*
@@ -493,7 +529,7 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
                                fingers = bmap_fingers;
 
                        /* Now process position packet */
-                       packet = priv->multi_data;
+                       priv->decode_fields(&f, priv->multi_data);
                } else {
                        priv->multi_packet = 0;
                }
@@ -507,10 +543,10 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
         * out misidentified bitmap packets, we reject anything with this
         * bit set.
         */
-       if (packet[0] & 0x40)
+       if (f.is_mp)
                return;
 
-       if (!priv->multi_packet && (packet[4] & 0x40)) {
+       if (!priv->multi_packet && f.first_mp) {
                priv->multi_packet = 1;
                memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
                return;
@@ -518,22 +554,13 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
 
        priv->multi_packet = 0;
 
-       left = packet[3] & 0x01;
-       right = packet[3] & 0x02;
-       middle = packet[3] & 0x04;
-
-       x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
-           ((packet[0] & 0x30) >> 4);
-       y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
-       z = packet[5] & 0x7f;
-
        /*
         * Sometimes the hardware sends a single packet with z = 0
         * in the middle of a stream. Real releases generate packets
         * with x, y, and z all zero, so these seem to be flukes.
         * Ignore them.
         */
-       if (x && y && !z)
+       if (f.x && f.y && !f.z)
                return;
 
        /*
@@ -541,12 +568,12 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
         * to rely on ST data.
         */
        if (!fingers) {
-               x1 = x;
-               y1 = y;
-               fingers = z > 0 ? 1 : 0;
+               x1 = f.x;
+               y1 = f.y;
+               fingers = f.z > 0 ? 1 : 0;
        }
 
-       if (z >= 64)
+       if (f.z >= 64)
                input_report_key(dev, BTN_TOUCH, 1);
        else
                input_report_key(dev, BTN_TOUCH, 0);
@@ -555,26 +582,22 @@ static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
 
        input_mt_report_finger_count(dev, fingers);
 
-       input_report_key(dev, BTN_LEFT, left);
-       input_report_key(dev, BTN_RIGHT, right);
-       input_report_key(dev, BTN_MIDDLE, middle);
+       input_report_key(dev, BTN_LEFT, f.left);
+       input_report_key(dev, BTN_RIGHT, f.right);
+       input_report_key(dev, BTN_MIDDLE, f.middle);
 
-       if (z > 0) {
-               input_report_abs(dev, ABS_X, x);
-               input_report_abs(dev, ABS_Y, y);
+       if (f.z > 0) {
+               input_report_abs(dev, ABS_X, f.x);
+               input_report_abs(dev, ABS_Y, f.y);
        }
-       input_report_abs(dev, ABS_PRESSURE, z);
+       input_report_abs(dev, ABS_PRESSURE, f.z);
 
        input_sync(dev);
 
        if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
-               left = packet[3] & 0x10;
-               right = packet[3] & 0x20;
-               middle = packet[3] & 0x40;
-
-               input_report_key(dev2, BTN_LEFT, left);
-               input_report_key(dev2, BTN_RIGHT, right);
-               input_report_key(dev2, BTN_MIDDLE, middle);
+               input_report_key(dev2, BTN_LEFT, f.ts_left);
+               input_report_key(dev2, BTN_RIGHT, f.ts_right);
+               input_report_key(dev2, BTN_MIDDLE, f.ts_middle);
                input_sync(dev2);
        }
 }
@@ -639,7 +662,7 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
                           ((priv->multi_data[3] & 0x1f) << 5) |
                            (priv->multi_data[1] & 0x1f);
 
-               fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+               fingers = alps_process_bitmap(priv, x_bitmap, y_bitmap,
                                              &x1, &y1, &x2, &y2);
 
                /* Store MT data.*/
@@ -696,25 +719,6 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
        input_sync(dev);
 }
 
-static void alps_process_packet(struct psmouse *psmouse)
-{
-       struct alps_data *priv = psmouse->private;
-       const struct alps_model_info *model = priv->i;
-
-       switch (model->proto_version) {
-       case ALPS_PROTO_V1:
-       case ALPS_PROTO_V2:
-               alps_process_packet_v1_v2(psmouse);
-               break;
-       case ALPS_PROTO_V3:
-               alps_process_packet_v3(psmouse);
-               break;
-       case ALPS_PROTO_V4:
-               alps_process_packet_v4(psmouse);
-               break;
-       }
-}
-
 static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
                                        unsigned char packet[],
                                        bool report_buttons)
@@ -765,14 +769,14 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
                if (((psmouse->packet[3] |
                      psmouse->packet[4] |
                      psmouse->packet[5]) & 0x80) ||
-                   (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) {
+                   (!alps_is_valid_first_byte(priv, psmouse->packet[6]))) {
                        psmouse_dbg(psmouse,
                                    "refusing packet %4ph (suspected interleaved ps/2)\n",
                                    psmouse->packet + 3);
                        return PSMOUSE_BAD_DATA;
                }
 
-               alps_process_packet(psmouse);
+               priv->process_packet(psmouse);
 
                /* Continue with the next packet */
                psmouse->packet[0] = psmouse->packet[6];
@@ -816,6 +820,7 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
 static void alps_flush_packet(unsigned long data)
 {
        struct psmouse *psmouse = (struct psmouse *)data;
+       struct alps_data *priv = psmouse->private;
 
        serio_pause_rx(psmouse->ps2dev.serio);
 
@@ -833,7 +838,7 @@ static void alps_flush_packet(unsigned long data)
                                    "refusing packet %3ph (suspected interleaved ps/2)\n",
                                    psmouse->packet + 3);
                } else {
-                       alps_process_packet(psmouse);
+                       priv->process_packet(psmouse);
                }
                psmouse->pktcnt = 0;
        }
@@ -844,7 +849,6 @@ static void alps_flush_packet(unsigned long data)
 static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
-       const struct alps_model_info *model = priv->i;
 
        if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
                if (psmouse->pktcnt == 3) {
@@ -857,15 +861,15 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
 
        /* Check for PS/2 packet stuffed in the middle of ALPS packet. */
 
-       if ((model->flags & ALPS_PS2_INTERLEAVED) &&
+       if ((priv->flags & ALPS_PS2_INTERLEAVED) &&
            psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) {
                return alps_handle_interleaved_ps2(psmouse);
        }
 
-       if (!alps_is_valid_first_byte(model, psmouse->packet[0])) {
+       if (!alps_is_valid_first_byte(priv, psmouse->packet[0])) {
                psmouse_dbg(psmouse,
                            "refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n",
-                           psmouse->packet[0], model->mask0, model->byte0);
+                           psmouse->packet[0], priv->mask0, priv->byte0);
                return PSMOUSE_BAD_DATA;
        }
 
@@ -879,7 +883,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
        }
 
        if (psmouse->pktcnt == psmouse->pktsize) {
-               alps_process_packet(psmouse);
+               priv->process_packet(psmouse);
                return PSMOUSE_FULL_PACKET;
        }
 
@@ -967,24 +971,42 @@ static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
        return __alps_command_mode_write_reg(psmouse, value);
 }
 
+static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
+                       int repeated_command, unsigned char *param)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+       param[0] = 0;
+       if (init_command && ps2_command(ps2dev, param, init_command))
+               return -EIO;
+
+       if (ps2_command(ps2dev,  NULL, repeated_command) ||
+           ps2_command(ps2dev,  NULL, repeated_command) ||
+           ps2_command(ps2dev,  NULL, repeated_command))
+               return -EIO;
+
+       param[0] = param[1] = param[2] = 0xff;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+               return -EIO;
+
+       psmouse_dbg(psmouse, "%2.2X report: %2.2x %2.2x %2.2x\n",
+                   repeated_command, param[0], param[1], param[2]);
+       return 0;
+}
+
 static int alps_enter_command_mode(struct psmouse *psmouse,
                                   unsigned char *resp)
 {
        unsigned char param[4];
-       struct ps2dev *ps2dev = &psmouse->ps2dev;
 
-       if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
-           ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
-           ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
-           ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+       if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_RESET_WRAP, param)) {
                psmouse_err(psmouse, "failed to enter command mode\n");
                return -1;
        }
 
-       if (param[0] != 0x88 && param[1] != 0x07) {
+       if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) {
                psmouse_dbg(psmouse,
-                           "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
-                           param[0], param[1], param[2]);
+                           "unknown response while entering command mode\n");
                return -1;
        }
 
@@ -1001,99 +1023,6 @@ static inline int alps_exit_command_mode(struct psmouse *psmouse)
        return 0;
 }
 
-static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
-{
-       struct ps2dev *ps2dev = &psmouse->ps2dev;
-       static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
-       unsigned char param[4];
-       const struct alps_model_info *model = NULL;
-       int i;
-
-       /*
-        * First try "E6 report".
-        * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
-        * The bits 0-2 of the first byte will be 1s if some buttons are
-        * pressed.
-        */
-       param[0] = 0;
-       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
-           ps2_command(ps2dev,  NULL, PSMOUSE_CMD_SETSCALE11) ||
-           ps2_command(ps2dev,  NULL, PSMOUSE_CMD_SETSCALE11) ||
-           ps2_command(ps2dev,  NULL, PSMOUSE_CMD_SETSCALE11))
-               return NULL;
-
-       param[0] = param[1] = param[2] = 0xff;
-       if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
-               return NULL;
-
-       psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
-                   param[0], param[1], param[2]);
-
-       if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
-           (param[2] != 10 && param[2] != 100))
-               return NULL;
-
-       /*
-        * Now try "E7 report". Allowed responses are in
-        * alps_model_data[].signature
-        */
-       param[0] = 0;
-       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
-           ps2_command(ps2dev,  NULL, PSMOUSE_CMD_SETSCALE21) ||
-           ps2_command(ps2dev,  NULL, PSMOUSE_CMD_SETSCALE21) ||
-           ps2_command(ps2dev,  NULL, PSMOUSE_CMD_SETSCALE21))
-               return NULL;
-
-       param[0] = param[1] = param[2] = 0xff;
-       if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
-               return NULL;
-
-       psmouse_dbg(psmouse, "E7 report: %2.2x %2.2x %2.2x",
-                   param[0], param[1], param[2]);
-
-       if (version) {
-               for (i = 0; i < ARRAY_SIZE(rates) && param[2] != rates[i]; i++)
-                       /* empty */;
-               *version = (param[0] << 8) | (param[1] << 4) | i;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
-               if (!memcmp(param, alps_model_data[i].signature,
-                           sizeof(alps_model_data[i].signature))) {
-                       model = alps_model_data + i;
-                       break;
-               }
-       }
-
-       if (model && model->proto_version > ALPS_PROTO_V2) {
-               /*
-                * Need to check command mode response to identify
-                * model
-                */
-               model = NULL;
-               if (alps_enter_command_mode(psmouse, param)) {
-                       psmouse_warn(psmouse,
-                                    "touchpad failed to enter command mode\n");
-               } else {
-                       for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
-                               if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
-                                   alps_model_data[i].command_mode_resp == param[0]) {
-                                       model = alps_model_data + i;
-                                       break;
-                               }
-                       }
-                       alps_exit_command_mode(psmouse);
-
-                       if (!model)
-                               psmouse_dbg(psmouse,
-                                           "Unknown command mode response %2.2x\n",
-                                           param[0]);
-               }
-       }
-
-       return model;
-}
-
 /*
  * For DualPoint devices select the device that should respond to
  * subsequent commands. It looks like glidepad is behind stickpointer,
@@ -1137,18 +1066,10 @@ static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
 
 static int alps_get_status(struct psmouse *psmouse, char *param)
 {
-       struct ps2dev *ps2dev = &psmouse->ps2dev;
-
        /* Get status: 0xF5 0xF5 0xF5 0xE9 */
-       if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
-           ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
-           ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) ||
-           ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+       if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_DISABLE, param))
                return -1;
 
-       psmouse_dbg(psmouse, "Status: %2.2x %2.2x %2.2x",
-                   param[0], param[1], param[2]);
-
        return 0;
 }
 
@@ -1190,16 +1111,16 @@ static int alps_poll(struct psmouse *psmouse)
        unsigned char buf[sizeof(psmouse->packet)];
        bool poll_failed;
 
-       if (priv->i->flags & ALPS_PASS)
+       if (priv->flags & ALPS_PASS)
                alps_passthrough_mode_v2(psmouse, true);
 
        poll_failed = ps2_command(&psmouse->ps2dev, buf,
                                  PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
 
-       if (priv->i->flags & ALPS_PASS)
+       if (priv->flags & ALPS_PASS)
                alps_passthrough_mode_v2(psmouse, false);
 
-       if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
+       if (poll_failed || (buf[0] & priv->mask0) != priv->byte0)
                return -1;
 
        if ((psmouse->badbyte & 0xc8) == 0x08) {
@@ -1217,9 +1138,8 @@ static int alps_poll(struct psmouse *psmouse)
 static int alps_hw_init_v1_v2(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
-       const struct alps_model_info *model = priv->i;
 
-       if ((model->flags & ALPS_PASS) &&
+       if ((priv->flags & ALPS_PASS) &&
            alps_passthrough_mode_v2(psmouse, true)) {
                return -1;
        }
@@ -1234,7 +1154,7 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
                return -1;
        }
 
-       if ((model->flags & ALPS_PASS) &&
+       if ((priv->flags & ALPS_PASS) &&
            alps_passthrough_mode_v2(psmouse, false)) {
                return -1;
        }
@@ -1249,26 +1169,31 @@ static int alps_hw_init_v1_v2(struct psmouse *psmouse)
 }
 
 /*
- * Enable or disable passthrough mode to the trackstick. Must be in
- * command mode when calling this function.
+ * Enable or disable passthrough mode to the trackstick.
  */
-static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v3(struct psmouse *psmouse,
+                                   int reg_base, bool enable)
 {
-       int reg_val;
+       int reg_val, ret = -1;
 
-       reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
-       if (reg_val == -1)
+       if (alps_enter_command_mode(psmouse, NULL))
                return -1;
 
+       reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
+       if (reg_val == -1)
+               goto error;
+
        if (enable)
                reg_val |= 0x01;
        else
                reg_val &= ~0x01;
 
-       if (__alps_command_mode_write_reg(psmouse, reg_val))
-               return -1;
+       ret = __alps_command_mode_write_reg(psmouse, reg_val);
 
-       return 0;
+error:
+       if (alps_exit_command_mode(psmouse))
+               ret = -1;
+       return ret;
 }
 
 /* Must be in command mode when calling this function */
@@ -1287,73 +1212,102 @@ static int alps_absolute_mode_v3(struct psmouse *psmouse)
        return 0;
 }
 
-static int alps_hw_init_v3(struct psmouse *psmouse)
+static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
 {
-       struct alps_data *priv = psmouse->private;
-       struct ps2dev *ps2dev = &psmouse->ps2dev;
-       int reg_val;
-       unsigned char param[4];
-
-       priv->nibble_commands = alps_v3_nibble_commands;
-       priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+       int ret = -EIO, reg_val;
 
        if (alps_enter_command_mode(psmouse, NULL))
                goto error;
 
-       /* Check for trackstick */
-       reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+       reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
        if (reg_val == -1)
                goto error;
-       if (reg_val & 0x80) {
-               if (alps_passthrough_mode_v3(psmouse, true))
-                       goto error;
-               if (alps_exit_command_mode(psmouse))
-                       goto error;
+
+       /* bit 7: trackstick is present */
+       ret = reg_val & 0x80 ? 0 : -ENODEV;
+
+error:
+       alps_exit_command_mode(psmouse);
+       return ret;
+}
+
+static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       int ret = 0;
+       unsigned char param[4];
+
+       if (alps_passthrough_mode_v3(psmouse, reg_base, true))
+               return -EIO;
+
+       /*
+        * E7 report for the trackstick
+        *
+        * There have been reports of failures to seem to trace back
+        * to the above trackstick check failing. When these occur
+        * this E7 report fails, so when that happens we continue
+        * with the assumption that there isn't a trackstick after
+        * all.
+        */
+       if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
+               psmouse_warn(psmouse, "trackstick E7 report failed\n");
+               ret = -ENODEV;
+       } else {
+               psmouse_dbg(psmouse,
+                           "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+                           param[0], param[1], param[2]);
 
                /*
-                * E7 report for the trackstick
-                *
-                * There have been reports of failures to seem to trace back
-                * to the above trackstick check failing. When these occur
-                * this E7 report fails, so when that happens we continue
-                * with the assumption that there isn't a trackstick after
-                * all.
+                * Not sure what this does, but it is absolutely
+                * essential. Without it, the touchpad does not
+                * work at all and the trackstick just emits normal
+                * PS/2 packets.
                 */
-               param[0] = 0x64;
-               if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
-                   ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
-                   ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
-                   ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
-                       psmouse_warn(psmouse, "trackstick E7 report failed\n");
-               } else {
-                       psmouse_dbg(psmouse,
-                                   "trackstick E7 report: %2.2x %2.2x %2.2x\n",
-                                   param[0], param[1], param[2]);
-
-                       /*
-                        * Not sure what this does, but it is absolutely
-                        * essential. Without it, the touchpad does not
-                        * work at all and the trackstick just emits normal
-                        * PS/2 packets.
-                        */
-                       if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
-                           ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
-                           ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
-                           alps_command_mode_send_nibble(psmouse, 0x9) ||
-                           alps_command_mode_send_nibble(psmouse, 0x4)) {
-                               psmouse_err(psmouse,
-                                           "Error sending magic E6 sequence\n");
-                               goto error_passthrough;
-                       }
+               if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+                   ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+                   ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+                   alps_command_mode_send_nibble(psmouse, 0x9) ||
+                   alps_command_mode_send_nibble(psmouse, 0x4)) {
+                       psmouse_err(psmouse,
+                                   "Error sending magic E6 sequence\n");
+                       ret = -EIO;
+                       goto error;
                }
 
-               if (alps_enter_command_mode(psmouse, NULL))
-                       goto error_passthrough;
-               if (alps_passthrough_mode_v3(psmouse, false))
-                       goto error;
+               /*
+                * This ensures the trackstick packets are in the format
+                * supported by this driver. If bit 1 isn't set the packet
+                * format is different.
+                */
+               if (alps_enter_command_mode(psmouse, NULL) ||
+                   alps_command_mode_write_reg(psmouse,
+                                               reg_base + 0x08, 0x82) ||
+                   alps_exit_command_mode(psmouse))
+                       ret = -EIO;
        }
 
-       if (alps_absolute_mode_v3(psmouse)) {
+error:
+       if (alps_passthrough_mode_v3(psmouse, reg_base, false))
+               ret = -EIO;
+
+       return ret;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       int reg_val;
+       unsigned char param[4];
+
+       reg_val = alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE);
+       if (reg_val == -EIO)
+               goto error;
+       if (reg_val == 0 &&
+           alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
+               goto error;
+
+       if (alps_enter_command_mode(psmouse, NULL) ||
+           alps_absolute_mode_v3(psmouse)) {
                psmouse_err(psmouse, "Failed to enter absolute mode\n");
                goto error;
        }
@@ -1390,14 +1344,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
        if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
                goto error;
 
-       /*
-        * This ensures the trackstick packets are in the format
-        * supported by this driver. If bit 1 isn't set the packet
-        * format is different.
-        */
-       if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
-               goto error;
-
        alps_exit_command_mode(psmouse);
 
        /* Set rate and enable data reporting */
@@ -1410,10 +1356,6 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
 
        return 0;
 
-error_passthrough:
-       /* Something failed while in passthrough mode, so try to get out */
-       if (!alps_enter_command_mode(psmouse, NULL))
-               alps_passthrough_mode_v3(psmouse, false);
 error:
        /*
         * Leaving the touchpad in command mode will essentially render
@@ -1424,6 +1366,50 @@ error:
        return -1;
 }
 
+static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       int reg_val, ret = -1;
+
+       if (priv->flags & ALPS_DUALPOINT) {
+               reg_val = alps_setup_trackstick_v3(psmouse,
+                                                  ALPS_REG_BASE_RUSHMORE);
+               if (reg_val == -EIO)
+                       goto error;
+               if (reg_val == -ENODEV)
+                       priv->flags &= ~ALPS_DUALPOINT;
+       }
+
+       if (alps_enter_command_mode(psmouse, NULL) ||
+           alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
+           alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
+               goto error;
+
+       reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6);
+       if (reg_val == -1)
+               goto error;
+       if (__alps_command_mode_write_reg(psmouse, reg_val & 0xfd))
+               goto error;
+
+       if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64))
+               goto error;
+
+       /* enter absolute mode */
+       reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4);
+       if (reg_val == -1)
+               goto error;
+       if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02))
+               goto error;
+
+       alps_exit_command_mode(psmouse);
+       return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+error:
+       alps_exit_command_mode(psmouse);
+       return ret;
+}
+
 /* Must be in command mode when calling this function */
 static int alps_absolute_mode_v4(struct psmouse *psmouse)
 {
@@ -1442,13 +1428,9 @@ static int alps_absolute_mode_v4(struct psmouse *psmouse)
 
 static int alps_hw_init_v4(struct psmouse *psmouse)
 {
-       struct alps_data *priv = psmouse->private;
        struct ps2dev *ps2dev = &psmouse->ps2dev;
        unsigned char param[4];
 
-       priv->nibble_commands = alps_v4_nibble_commands;
-       priv->addr_command = PSMOUSE_CMD_DISABLE;
-
        if (alps_enter_command_mode(psmouse, NULL))
                goto error;
 
@@ -1517,39 +1499,140 @@ error:
        return -1;
 }
 
-static int alps_hw_init(struct psmouse *psmouse)
+static void alps_set_defaults(struct alps_data *priv)
 {
-       struct alps_data *priv = psmouse->private;
-       const struct alps_model_info *model = priv->i;
-       int ret = -1;
+       priv->byte0 = 0x8f;
+       priv->mask0 = 0x8f;
+       priv->flags = ALPS_DUALPOINT;
+
+       priv->x_max = 2000;
+       priv->y_max = 1400;
+       priv->x_bits = 15;
+       priv->y_bits = 11;
 
-       switch (model->proto_version) {
+       switch (priv->proto_version) {
        case ALPS_PROTO_V1:
        case ALPS_PROTO_V2:
-               ret = alps_hw_init_v1_v2(psmouse);
+               priv->hw_init = alps_hw_init_v1_v2;
+               priv->process_packet = alps_process_packet_v1_v2;
+               priv->set_abs_params = alps_set_abs_params_st;
                break;
        case ALPS_PROTO_V3:
-               ret = alps_hw_init_v3(psmouse);
+               priv->hw_init = alps_hw_init_v3;
+               priv->process_packet = alps_process_packet_v3;
+               priv->set_abs_params = alps_set_abs_params_mt;
+               priv->decode_fields = alps_decode_pinnacle;
+               priv->nibble_commands = alps_v3_nibble_commands;
+               priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
                break;
        case ALPS_PROTO_V4:
-               ret = alps_hw_init_v4(psmouse);
+               priv->hw_init = alps_hw_init_v4;
+               priv->process_packet = alps_process_packet_v4;
+               priv->set_abs_params = alps_set_abs_params_mt;
+               priv->nibble_commands = alps_v4_nibble_commands;
+               priv->addr_command = PSMOUSE_CMD_DISABLE;
                break;
        }
+}
 
-       return ret;
+static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
+                           unsigned char *e7, unsigned char *ec)
+{
+       const struct alps_model_info *model;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+               model = &alps_model_data[i];
+
+               if (!memcmp(e7, model->signature, sizeof(model->signature)) &&
+                   (!model->command_mode_resp ||
+                    model->command_mode_resp == ec[2])) {
+
+                       priv->proto_version = model->proto_version;
+                       alps_set_defaults(priv);
+
+                       priv->flags = model->flags;
+                       priv->byte0 = model->byte0;
+                       priv->mask0 = model->mask0;
+
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
+{
+       unsigned char e6[4], e7[4], ec[4];
+
+       /*
+        * First try "E6 report".
+        * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+        * The bits 0-2 of the first byte will be 1s if some buttons are
+        * pressed.
+        */
+       if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+                        PSMOUSE_CMD_SETSCALE11, e6))
+               return -EIO;
+
+       if ((e6[0] & 0xf8) != 0 || e6[1] != 0 || (e6[2] != 10 && e6[2] != 100))
+               return -EINVAL;
+
+       /*
+        * Now get the "E7" and "EC" reports.  These will uniquely identify
+        * most ALPS touchpads.
+        */
+       if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+                        PSMOUSE_CMD_SETSCALE21, e7) ||
+           alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES,
+                        PSMOUSE_CMD_RESET_WRAP, ec) ||
+           alps_exit_command_mode(psmouse))
+               return -EIO;
+
+       if (alps_match_table(psmouse, priv, e7, ec) == 0) {
+               return 0;
+       } else if (ec[0] == 0x88 && ec[1] == 0x08) {
+               priv->proto_version = ALPS_PROTO_V3;
+               alps_set_defaults(priv);
+
+               priv->hw_init = alps_hw_init_rushmore_v3;
+               priv->decode_fields = alps_decode_rushmore;
+               priv->x_bits = 16;
+               priv->y_bits = 12;
+
+               /* hack to make addr_command, nibble_command available */
+               psmouse->private = priv;
+
+               if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
+                       priv->flags &= ~ALPS_DUALPOINT;
+
+               return 0;
+       } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+                  ec[2] >= 0x90 && ec[2] <= 0x9d) {
+               priv->proto_version = ALPS_PROTO_V3;
+               alps_set_defaults(priv);
+
+               return 0;
+       }
+
+       psmouse_info(psmouse,
+               "Unknown ALPS touchpad: E7=%2.2x %2.2x %2.2x, EC=%2.2x %2.2x %2.2x\n",
+               e7[0], e7[1], e7[2], ec[0], ec[1], ec[2]);
+
+       return -EINVAL;
 }
 
 static int alps_reconnect(struct psmouse *psmouse)
 {
-       const struct alps_model_info *model;
+       struct alps_data *priv = psmouse->private;
 
        psmouse_reset(psmouse);
 
-       model = alps_get_model(psmouse, NULL);
-       if (!model)
+       if (alps_identify(psmouse, priv) < 0)
                return -1;
 
-       return alps_hw_init(psmouse);
+       return priv->hw_init(psmouse);
 }
 
 static void alps_disconnect(struct psmouse *psmouse)
@@ -1562,12 +1645,33 @@ static void alps_disconnect(struct psmouse *psmouse)
        kfree(priv);
 }
 
+static void alps_set_abs_params_st(struct alps_data *priv,
+                                  struct input_dev *dev1)
+{
+       input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+       input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+}
+
+static void alps_set_abs_params_mt(struct alps_data *priv,
+                                  struct input_dev *dev1)
+{
+       set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+       input_mt_init_slots(dev1, 2, 0);
+       input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
+       input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
+
+       set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+       set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+       set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+
+       input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
+       input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
+}
+
 int alps_init(struct psmouse *psmouse)
 {
        struct alps_data *priv;
-       const struct alps_model_info *model;
        struct input_dev *dev1 = psmouse->dev, *dev2;
-       int version;
 
        priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
        dev2 = input_allocate_device();
@@ -1581,13 +1685,10 @@ int alps_init(struct psmouse *psmouse)
 
        psmouse_reset(psmouse);
 
-       model = alps_get_model(psmouse, &version);
-       if (!model)
+       if (alps_identify(psmouse, priv) < 0)
                goto init_fail;
 
-       priv->i = model;
-
-       if (alps_hw_init(psmouse))
+       if (priv->hw_init(psmouse))
                goto init_fail;
 
        /*
@@ -1609,41 +1710,20 @@ int alps_init(struct psmouse *psmouse)
 
        dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
 
-       switch (model->proto_version) {
-       case ALPS_PROTO_V1:
-       case ALPS_PROTO_V2:
-               input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
-               input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
-               break;
-       case ALPS_PROTO_V3:
-       case ALPS_PROTO_V4:
-               set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
-               input_mt_init_slots(dev1, 2, 0);
-               input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
-               input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
-
-               set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
-               set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
-               set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
-
-               input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
-               input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
-               break;
-       }
-
+       priv->set_abs_params(priv, dev1);
        input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
 
-       if (model->flags & ALPS_WHEEL) {
+       if (priv->flags & ALPS_WHEEL) {
                dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL);
                dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL);
        }
 
-       if (model->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
+       if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) {
                dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD);
                dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK);
        }
 
-       if (model->flags & ALPS_FOUR_BUTTONS) {
+       if (priv->flags & ALPS_FOUR_BUTTONS) {
                dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0);
                dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1);
                dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2);
@@ -1654,7 +1734,8 @@ int alps_init(struct psmouse *psmouse)
 
        snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
        dev2->phys = priv->phys;
-       dev2->name = (model->flags & ALPS_DUALPOINT) ? "DualPoint Stick" : "PS/2 Mouse";
+       dev2->name = (priv->flags & ALPS_DUALPOINT) ?
+                    "DualPoint Stick" : "PS/2 Mouse";
        dev2->id.bustype = BUS_I8042;
        dev2->id.vendor  = 0x0002;
        dev2->id.product = PSMOUSE_ALPS;
@@ -1673,7 +1754,7 @@ int alps_init(struct psmouse *psmouse)
        psmouse->poll = alps_poll;
        psmouse->disconnect = alps_disconnect;
        psmouse->reconnect = alps_reconnect;
-       psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
+       psmouse->pktsize = priv->proto_version == ALPS_PROTO_V4 ? 8 : 6;
 
        /* We are having trouble resyncing ALPS touchpads so disable it for now */
        psmouse->resync_time = 0;
@@ -1690,18 +1771,16 @@ init_fail:
 
 int alps_detect(struct psmouse *psmouse, bool set_properties)
 {
-       int version;
-       const struct alps_model_info *model;
+       struct alps_data dummy;
 
-       model = alps_get_model(psmouse, &version);
-       if (!model)
+       if (alps_identify(psmouse, &dummy) < 0)
                return -1;
 
        if (set_properties) {
                psmouse->vendor = "ALPS";
-               psmouse->name = model->flags & ALPS_DUALPOINT ?
+               psmouse->name = dummy.flags & ALPS_DUALPOINT ?
                                "DualPoint TouchPad" : "GlidePoint";
-               psmouse->model = version;
+               psmouse->model = dummy.proto_version << 8;
        }
        return 0;
 }
index ae1ac354c7787a6a9fd2c5430debcfead8b7155b..970480551b6e67e540fdb76b2865e8dd573d550a 100644 (file)
 #ifndef _ALPS_H
 #define _ALPS_H
 
-#define ALPS_PROTO_V1  0
-#define ALPS_PROTO_V2  1
-#define ALPS_PROTO_V3  2
-#define ALPS_PROTO_V4  3
+#define ALPS_PROTO_V1  1
+#define ALPS_PROTO_V2  2
+#define ALPS_PROTO_V3  3
+#define ALPS_PROTO_V4  4
 
+/**
+ * struct alps_model_info - touchpad ID table
+ * @signature: E7 response string to match.
+ * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response
+ *   (aka command mode response) identifies the firmware minor version.  This
+ *   can be used to distinguish different hardware models which are not
+ *   uniquely identifiable through their E7 responses.
+ * @proto_version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ *   known format for this model.  The first byte of the report, ANDed with
+ *   mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ *
+ * Many (but not all) ALPS touchpads can be identified by looking at the
+ * values returned in the "E7 report" and/or the "EC report."  This table
+ * lists a number of such touchpads.
+ */
 struct alps_model_info {
-        unsigned char signature[3];
-       unsigned char command_mode_resp; /* v3/v4 only */
+       unsigned char signature[3];
+       unsigned char command_mode_resp;
        unsigned char proto_version;
-        unsigned char byte0, mask0;
-        unsigned char flags;
+       unsigned char byte0, mask0;
+       unsigned char flags;
 };
 
+/**
+ * struct alps_nibble_commands - encodings for register accesses
+ * @command: PS/2 command used for the nibble
+ * @data: Data supplied as an argument to the PS/2 command, if applicable
+ *
+ * The ALPS protocol uses magic sequences to transmit binary data to the
+ * touchpad, as it is generally not OK to send arbitrary bytes out the
+ * PS/2 port.  Each of the sequences in this table sends one nibble of the
+ * register address or (write) data.  Different versions of the ALPS protocol
+ * use slightly different encodings.
+ */
 struct alps_nibble_commands {
        int command;
        unsigned char data;
 };
 
+/**
+ * struct alps_fields - decoded version of the report packet
+ * @x_map: Bitmap of active X positions for MT.
+ * @y_map: Bitmap of active Y positions for MT.
+ * @fingers: Number of fingers for MT.
+ * @x: X position for ST.
+ * @y: Y position for ST.
+ * @z: Z position for ST.
+ * @first_mp: Packet is the first of a multi-packet report.
+ * @is_mp: Packet is part of a multi-packet report.
+ * @left: Left touchpad button is active.
+ * @right: Right touchpad button is active.
+ * @middle: Middle touchpad button is active.
+ * @ts_left: Left trackstick button is active.
+ * @ts_right: Right trackstick button is active.
+ * @ts_middle: Middle trackstick button is active.
+ */
+struct alps_fields {
+       unsigned int x_map;
+       unsigned int y_map;
+       unsigned int fingers;
+       unsigned int x;
+       unsigned int y;
+       unsigned int z;
+       unsigned int first_mp:1;
+       unsigned int is_mp:1;
+
+       unsigned int left:1;
+       unsigned int right:1;
+       unsigned int middle:1;
+
+       unsigned int ts_left:1;
+       unsigned int ts_right:1;
+       unsigned int ts_middle:1;
+};
+
+/**
+ * struct alps_data - private data structure for the ALPS driver
+ * @dev2: "Relative" device used to report trackstick or mouse activity.
+ * @phys: Physical path for the relative device.
+ * @nibble_commands: Command mapping used for touchpad register accesses.
+ * @addr_command: Command used to tell the touchpad that a register address
+ *   follows.
+ * @proto_version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ *   known format for this model.  The first byte of the report, ANDed with
+ *   mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @x_max: Largest possible X position value.
+ * @y_max: Largest possible Y position value.
+ * @x_bits: Number of X bits in the MT bitmap.
+ * @y_bits: Number of Y bits in the MT bitmap.
+ * @hw_init: Protocol-specific hardware init function.
+ * @process_packet: Protocol-specific function to process a report packet.
+ * @decode_fields: Protocol-specific function to read packet bitfields.
+ * @set_abs_params: Protocol-specific function to configure the input_dev.
+ * @prev_fin: Finger bit from previous packet.
+ * @multi_packet: Multi-packet data in progress.
+ * @multi_data: Saved multi-packet data.
+ * @x1: First X coordinate from last MT report.
+ * @x2: Second X coordinate from last MT report.
+ * @y1: First Y coordinate from last MT report.
+ * @y2: Second Y coordinate from last MT report.
+ * @fingers: Number of fingers from last MT report.
+ * @quirks: Bitmap of ALPS_QUIRK_*.
+ * @timer: Timer for flushing out the final report packet in the stream.
+ */
 struct alps_data {
-       struct input_dev *dev2;         /* Relative device */
-       char phys[32];                  /* Phys */
-       const struct alps_model_info *i;/* Info */
+       struct input_dev *dev2;
+       char phys[32];
+
+       /* these are autodetected when the device is identified */
        const struct alps_nibble_commands *nibble_commands;
-       int addr_command;               /* Command to set register address */
-       int prev_fin;                   /* Finger bit from previous packet */
-       int multi_packet;               /* Multi-packet data in progress */
-       unsigned char multi_data[6];    /* Saved multi-packet data */
-       int x1, x2, y1, y2;             /* Coordinates from last MT report */
-       int fingers;                    /* Number of fingers from MT report */
+       int addr_command;
+       unsigned char proto_version;
+       unsigned char byte0, mask0;
+       unsigned char flags;
+       int x_max;
+       int y_max;
+       int x_bits;
+       int y_bits;
+
+       int (*hw_init)(struct psmouse *psmouse);
+       void (*process_packet)(struct psmouse *psmouse);
+       void (*decode_fields)(struct alps_fields *f, unsigned char *p);
+       void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
+
+       int prev_fin;
+       int multi_packet;
+       unsigned char multi_data[6];
+       int x1, x2, y1, y2;
+       int fingers;
        u8 quirks;
        struct timer_list timer;
 };
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
new file mode 100644 (file)
index 0000000..b409c3d
--- /dev/null
@@ -0,0 +1,973 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ * Further cleanup and restructuring by:
+ *   Daniel Kurtz <djkurtz@chromium.org>
+ *   Benson Leung <bleung@chromium.org>
+ *
+ * Copyright (C) 2011-2012 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2012 Google, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/* APA trackpad firmware generation */
+#define CYAPA_GEN3   0x03   /* support MT-protocol B with tracking ID. */
+
+#define CYAPA_NAME   "Cypress APA Trackpad (cyapa)"
+
+/* commands for read/write registers of Cypress trackpad */
+#define CYAPA_CMD_SOFT_RESET       0x00
+#define CYAPA_CMD_POWER_MODE       0x01
+#define CYAPA_CMD_DEV_STATUS       0x02
+#define CYAPA_CMD_GROUP_DATA       0x03
+#define CYAPA_CMD_GROUP_CMD        0x04
+#define CYAPA_CMD_GROUP_QUERY      0x05
+#define CYAPA_CMD_BL_STATUS        0x06
+#define CYAPA_CMD_BL_HEAD          0x07
+#define CYAPA_CMD_BL_CMD           0x08
+#define CYAPA_CMD_BL_DATA          0x09
+#define CYAPA_CMD_BL_ALL           0x0a
+#define CYAPA_CMD_BLK_PRODUCT_ID   0x0b
+#define CYAPA_CMD_BLK_HEAD         0x0c
+
+/* report data start reg offset address. */
+#define DATA_REG_START_OFFSET  0x0000
+
+#define BL_HEAD_OFFSET 0x00
+#define BL_DATA_OFFSET 0x10
+
+/*
+ * Operational Device Status Register
+ *
+ * bit 7: Valid interrupt source
+ * bit 6 - 4: Reserved
+ * bit 3 - 2: Power status
+ * bit 1 - 0: Device status
+ */
+#define REG_OP_STATUS     0x00
+#define OP_STATUS_SRC     0x80
+#define OP_STATUS_POWER   0x0c
+#define OP_STATUS_DEV     0x03
+#define OP_STATUS_MASK (OP_STATUS_SRC | OP_STATUS_POWER | OP_STATUS_DEV)
+
+/*
+ * Operational Finger Count/Button Flags Register
+ *
+ * bit 7 - 4: Number of touched finger
+ * bit 3: Valid data
+ * bit 2: Middle Physical Button
+ * bit 1: Right Physical Button
+ * bit 0: Left physical Button
+ */
+#define REG_OP_DATA1       0x01
+#define OP_DATA_VALID      0x08
+#define OP_DATA_MIDDLE_BTN 0x04
+#define OP_DATA_RIGHT_BTN  0x02
+#define OP_DATA_LEFT_BTN   0x01
+#define OP_DATA_BTN_MASK (OP_DATA_MIDDLE_BTN | OP_DATA_RIGHT_BTN | \
+                         OP_DATA_LEFT_BTN)
+
+/*
+ * Bootloader Status Register
+ *
+ * bit 7: Busy
+ * bit 6 - 5: Reserved
+ * bit 4: Bootloader running
+ * bit 3 - 1: Reserved
+ * bit 0: Checksum valid
+ */
+#define REG_BL_STATUS        0x01
+#define BL_STATUS_BUSY       0x80
+#define BL_STATUS_RUNNING    0x10
+#define BL_STATUS_DATA_VALID 0x08
+#define BL_STATUS_CSUM_VALID 0x01
+
+/*
+ * Bootloader Error Register
+ *
+ * bit 7: Invalid
+ * bit 6: Invalid security key
+ * bit 5: Bootloading
+ * bit 4: Command checksum
+ * bit 3: Flash protection error
+ * bit 2: Flash checksum error
+ * bit 1 - 0: Reserved
+ */
+#define REG_BL_ERROR         0x02
+#define BL_ERROR_INVALID     0x80
+#define BL_ERROR_INVALID_KEY 0x40
+#define BL_ERROR_BOOTLOADING 0x20
+#define BL_ERROR_CMD_CSUM    0x10
+#define BL_ERROR_FLASH_PROT  0x08
+#define BL_ERROR_FLASH_CSUM  0x04
+
+#define BL_STATUS_SIZE  3  /* length of bootloader status registers */
+#define BLK_HEAD_BYTES 32
+
+#define PRODUCT_ID_SIZE  16
+#define QUERY_DATA_SIZE  27
+#define REG_PROTOCOL_GEN_QUERY_OFFSET  20
+
+#define REG_OFFSET_DATA_BASE     0x0000
+#define REG_OFFSET_COMMAND_BASE  0x0028
+#define REG_OFFSET_QUERY_BASE    0x002a
+
+#define CAPABILITY_LEFT_BTN_MASK       (0x01 << 3)
+#define CAPABILITY_RIGHT_BTN_MASK      (0x01 << 4)
+#define CAPABILITY_MIDDLE_BTN_MASK     (0x01 << 5)
+#define CAPABILITY_BTN_MASK  (CAPABILITY_LEFT_BTN_MASK | \
+                             CAPABILITY_RIGHT_BTN_MASK | \
+                             CAPABILITY_MIDDLE_BTN_MASK)
+
+#define CYAPA_OFFSET_SOFT_RESET  REG_OFFSET_COMMAND_BASE
+
+#define REG_OFFSET_POWER_MODE (REG_OFFSET_COMMAND_BASE + 1)
+
+#define PWR_MODE_MASK   0xfc
+#define PWR_MODE_FULL_ACTIVE (0x3f << 2)
+#define PWR_MODE_IDLE        (0x05 << 2) /* default sleep time is 50 ms. */
+#define PWR_MODE_OFF         (0x00 << 2)
+
+#define PWR_STATUS_MASK      0x0c
+#define PWR_STATUS_ACTIVE    (0x03 << 2)
+#define PWR_STATUS_IDLE      (0x02 << 2)
+#define PWR_STATUS_OFF       (0x00 << 2)
+
+/*
+ * CYAPA trackpad device states.
+ * Used in register 0x00, bit1-0, DeviceStatus field.
+ * Other values indicate device is in an abnormal state and must be reset.
+ */
+#define CYAPA_DEV_NORMAL  0x03
+#define CYAPA_DEV_BUSY    0x01
+
+enum cyapa_state {
+       CYAPA_STATE_OP,
+       CYAPA_STATE_BL_IDLE,
+       CYAPA_STATE_BL_ACTIVE,
+       CYAPA_STATE_BL_BUSY,
+       CYAPA_STATE_NO_DEVICE,
+};
+
+
+struct cyapa_touch {
+       /*
+        * high bits or x/y position value
+        * bit 7 - 4: high 4 bits of x position value
+        * bit 3 - 0: high 4 bits of y position value
+        */
+       u8 xy_hi;
+       u8 x_lo;  /* low 8 bits of x position value. */
+       u8 y_lo;  /* low 8 bits of y position value. */
+       u8 pressure;
+       /* id range is 1 - 15.  It is incremented with every new touch. */
+       u8 id;
+} __packed;
+
+/* The touch.id is used as the MT slot id, thus max MT slot is 15 */
+#define CYAPA_MAX_MT_SLOTS  15
+
+struct cyapa_reg_data {
+       /*
+        * bit 0 - 1: device status
+        * bit 3 - 2: power mode
+        * bit 6 - 4: reserved
+        * bit 7: interrupt valid bit
+        */
+       u8 device_status;
+       /*
+        * bit 7 - 4: number of fingers currently touching pad
+        * bit 3: valid data check bit
+        * bit 2: middle mechanism button state if exists
+        * bit 1: right mechanism button state if exists
+        * bit 0: left mechanism button state if exists
+        */
+       u8 finger_btn;
+       /* CYAPA reports up to 5 touches per packet. */
+       struct cyapa_touch touches[5];
+} __packed;
+
+/* The main device structure */
+struct cyapa {
+       enum cyapa_state state;
+
+       struct i2c_client *client;
+       struct input_dev *input;
+       char phys[32];  /* device physical location */
+       int irq;
+       bool irq_wake;  /* irq wake is enabled */
+       bool smbus;
+
+       /* read from query data region. */
+       char product_id[16];
+       u8 btn_capability;
+       u8 gen;
+       int max_abs_x;
+       int max_abs_y;
+       int physical_size_x;
+       int physical_size_y;
+};
+
+static const u8 bl_deactivate[] = { 0x00, 0xff, 0x3b, 0x00, 0x01, 0x02, 0x03,
+               0x04, 0x05, 0x06, 0x07 };
+static const u8 bl_exit[] = { 0x00, 0xff, 0xa5, 0x00, 0x01, 0x02, 0x03, 0x04,
+               0x05, 0x06, 0x07 };
+
+struct cyapa_cmd_len {
+       u8 cmd;
+       u8 len;
+};
+
+#define CYAPA_ADAPTER_FUNC_NONE   0
+#define CYAPA_ADAPTER_FUNC_I2C    1
+#define CYAPA_ADAPTER_FUNC_SMBUS  2
+#define CYAPA_ADAPTER_FUNC_BOTH   3
+
+/*
+ * macros for SMBus communication
+ */
+#define SMBUS_READ   0x01
+#define SMBUS_WRITE 0x00
+#define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
+#define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
+#define SMBUS_BYTE_BLOCK_CMD_MASK 0x80
+#define SMBUS_GROUP_BLOCK_CMD_MASK 0x40
+
+ /* for byte read/write command */
+#define CMD_RESET 0
+#define CMD_POWER_MODE 1
+#define CMD_DEV_STATUS 2
+#define SMBUS_BYTE_CMD(cmd) (((cmd) & 0x3f) << 1)
+#define CYAPA_SMBUS_RESET SMBUS_BYTE_CMD(CMD_RESET)
+#define CYAPA_SMBUS_POWER_MODE SMBUS_BYTE_CMD(CMD_POWER_MODE)
+#define CYAPA_SMBUS_DEV_STATUS SMBUS_BYTE_CMD(CMD_DEV_STATUS)
+
+ /* for group registers read/write command */
+#define REG_GROUP_DATA 0
+#define REG_GROUP_CMD 2
+#define REG_GROUP_QUERY 3
+#define SMBUS_GROUP_CMD(grp) (0x80 | (((grp) & 0x07) << 3))
+#define CYAPA_SMBUS_GROUP_DATA SMBUS_GROUP_CMD(REG_GROUP_DATA)
+#define CYAPA_SMBUS_GROUP_CMD SMBUS_GROUP_CMD(REG_GROUP_CMD)
+#define CYAPA_SMBUS_GROUP_QUERY SMBUS_GROUP_CMD(REG_GROUP_QUERY)
+
+ /* for register block read/write command */
+#define CMD_BL_STATUS 0
+#define CMD_BL_HEAD 1
+#define CMD_BL_CMD 2
+#define CMD_BL_DATA 3
+#define CMD_BL_ALL 4
+#define CMD_BLK_PRODUCT_ID 5
+#define CMD_BLK_HEAD 6
+#define SMBUS_BLOCK_CMD(cmd) (0xc0 | (((cmd) & 0x1f) << 1))
+
+/* register block read/write command in bootloader mode */
+#define CYAPA_SMBUS_BL_STATUS SMBUS_BLOCK_CMD(CMD_BL_STATUS)
+#define CYAPA_SMBUS_BL_HEAD SMBUS_BLOCK_CMD(CMD_BL_HEAD)
+#define CYAPA_SMBUS_BL_CMD SMBUS_BLOCK_CMD(CMD_BL_CMD)
+#define CYAPA_SMBUS_BL_DATA SMBUS_BLOCK_CMD(CMD_BL_DATA)
+#define CYAPA_SMBUS_BL_ALL SMBUS_BLOCK_CMD(CMD_BL_ALL)
+
+/* register block read/write command in operational mode */
+#define CYAPA_SMBUS_BLK_PRODUCT_ID SMBUS_BLOCK_CMD(CMD_BLK_PRODUCT_ID)
+#define CYAPA_SMBUS_BLK_HEAD SMBUS_BLOCK_CMD(CMD_BLK_HEAD)
+
+static const struct cyapa_cmd_len cyapa_i2c_cmds[] = {
+       { CYAPA_OFFSET_SOFT_RESET, 1 },
+       { REG_OFFSET_COMMAND_BASE + 1, 1 },
+       { REG_OFFSET_DATA_BASE, 1 },
+       { REG_OFFSET_DATA_BASE, sizeof(struct cyapa_reg_data) },
+       { REG_OFFSET_COMMAND_BASE, 0 },
+       { REG_OFFSET_QUERY_BASE, QUERY_DATA_SIZE },
+       { BL_HEAD_OFFSET, 3 },
+       { BL_HEAD_OFFSET, 16 },
+       { BL_HEAD_OFFSET, 16 },
+       { BL_DATA_OFFSET, 16 },
+       { BL_HEAD_OFFSET, 32 },
+       { REG_OFFSET_QUERY_BASE, PRODUCT_ID_SIZE },
+       { REG_OFFSET_DATA_BASE, 32 }
+};
+
+static const struct cyapa_cmd_len cyapa_smbus_cmds[] = {
+       { CYAPA_SMBUS_RESET, 1 },
+       { CYAPA_SMBUS_POWER_MODE, 1 },
+       { CYAPA_SMBUS_DEV_STATUS, 1 },
+       { CYAPA_SMBUS_GROUP_DATA, sizeof(struct cyapa_reg_data) },
+       { CYAPA_SMBUS_GROUP_CMD, 2 },
+       { CYAPA_SMBUS_GROUP_QUERY, QUERY_DATA_SIZE },
+       { CYAPA_SMBUS_BL_STATUS, 3 },
+       { CYAPA_SMBUS_BL_HEAD, 16 },
+       { CYAPA_SMBUS_BL_CMD, 16 },
+       { CYAPA_SMBUS_BL_DATA, 16 },
+       { CYAPA_SMBUS_BL_ALL, 32 },
+       { CYAPA_SMBUS_BLK_PRODUCT_ID, PRODUCT_ID_SIZE },
+       { CYAPA_SMBUS_BLK_HEAD, 16 },
+};
+
+static ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
+                                       u8 *values)
+{
+       return i2c_smbus_read_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+static ssize_t cyapa_i2c_reg_write_block(struct cyapa *cyapa, u8 reg,
+                                        size_t len, const u8 *values)
+{
+       return i2c_smbus_write_i2c_block_data(cyapa->client, reg, len, values);
+}
+
+/*
+ * cyapa_smbus_read_block - perform smbus block read command
+ * @cyapa  - private data structure of the driver
+ * @cmd    - the properly encoded smbus command
+ * @len    - expected length of smbus command result
+ * @values - buffer to store smbus command result
+ *
+ * Returns negative errno, else the number of bytes written.
+ *
+ * Note:
+ * In trackpad device, the memory block allocated for I2C register map
+ * is 256 bytes, so the max read block for I2C bus is 256 bytes.
+ */
+static ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
+                                     u8 *values)
+{
+       ssize_t ret;
+       u8 index;
+       u8 smbus_cmd;
+       u8 *buf;
+       struct i2c_client *client = cyapa->client;
+
+       if (!(SMBUS_BYTE_BLOCK_CMD_MASK & cmd))
+               return -EINVAL;
+
+       if (SMBUS_GROUP_BLOCK_CMD_MASK & cmd) {
+               /* read specific block registers command. */
+               smbus_cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+               ret = i2c_smbus_read_block_data(client, smbus_cmd, values);
+               goto out;
+       }
+
+       ret = 0;
+       for (index = 0; index * I2C_SMBUS_BLOCK_MAX < len; index++) {
+               smbus_cmd = SMBUS_ENCODE_IDX(cmd, index);
+               smbus_cmd = SMBUS_ENCODE_RW(smbus_cmd, SMBUS_READ);
+               buf = values + I2C_SMBUS_BLOCK_MAX * index;
+               ret = i2c_smbus_read_block_data(client, smbus_cmd, buf);
+               if (ret < 0)
+                       goto out;
+       }
+
+out:
+       return ret > 0 ? len : ret;
+}
+
+static s32 cyapa_read_byte(struct cyapa *cyapa, u8 cmd_idx)
+{
+       u8 cmd;
+
+       if (cyapa->smbus) {
+               cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+               cmd = SMBUS_ENCODE_RW(cmd, SMBUS_READ);
+       } else {
+               cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+       }
+       return i2c_smbus_read_byte_data(cyapa->client, cmd);
+}
+
+static s32 cyapa_write_byte(struct cyapa *cyapa, u8 cmd_idx, u8 value)
+{
+       u8 cmd;
+
+       if (cyapa->smbus) {
+               cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+               cmd = SMBUS_ENCODE_RW(cmd, SMBUS_WRITE);
+       } else {
+               cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+       }
+       return i2c_smbus_write_byte_data(cyapa->client, cmd, value);
+}
+
+static ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values)
+{
+       u8 cmd;
+       size_t len;
+
+       if (cyapa->smbus) {
+               cmd = cyapa_smbus_cmds[cmd_idx].cmd;
+               len = cyapa_smbus_cmds[cmd_idx].len;
+               return cyapa_smbus_read_block(cyapa, cmd, len, values);
+       } else {
+               cmd = cyapa_i2c_cmds[cmd_idx].cmd;
+               len = cyapa_i2c_cmds[cmd_idx].len;
+               return cyapa_i2c_reg_read_block(cyapa, cmd, len, values);
+       }
+}
+
+/*
+ * Query device for its current operating state.
+ *
+ */
+static int cyapa_get_state(struct cyapa *cyapa)
+{
+       int ret;
+       u8 status[BL_STATUS_SIZE];
+
+       cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+       /*
+        * Get trackpad status by reading 3 registers starting from 0.
+        * If the device is in the bootloader, this will be BL_HEAD.
+        * If the device is in operation mode, this will be the DATA regs.
+        *
+        */
+       ret = cyapa_i2c_reg_read_block(cyapa, BL_HEAD_OFFSET, BL_STATUS_SIZE,
+                                      status);
+
+       /*
+        * On smbus systems in OP mode, the i2c_reg_read will fail with
+        * -ETIMEDOUT.  In this case, try again using the smbus equivalent
+        * command.  This should return a BL_HEAD indicating CYAPA_STATE_OP.
+        */
+       if (cyapa->smbus && (ret == -ETIMEDOUT || ret == -ENXIO))
+               ret = cyapa_read_block(cyapa, CYAPA_CMD_BL_STATUS, status);
+
+       if (ret != BL_STATUS_SIZE)
+               goto error;
+
+       if ((status[REG_OP_STATUS] & OP_STATUS_SRC) == OP_STATUS_SRC) {
+               switch (status[REG_OP_STATUS] & OP_STATUS_DEV) {
+               case CYAPA_DEV_NORMAL:
+               case CYAPA_DEV_BUSY:
+                       cyapa->state = CYAPA_STATE_OP;
+                       break;
+               default:
+                       ret = -EAGAIN;
+                       goto error;
+               }
+       } else {
+               if (status[REG_BL_STATUS] & BL_STATUS_BUSY)
+                       cyapa->state = CYAPA_STATE_BL_BUSY;
+               else if (status[REG_BL_ERROR] & BL_ERROR_BOOTLOADING)
+                       cyapa->state = CYAPA_STATE_BL_ACTIVE;
+               else
+                       cyapa->state = CYAPA_STATE_BL_IDLE;
+       }
+
+       return 0;
+error:
+       return (ret < 0) ? ret : -EAGAIN;
+}
+
+/*
+ * Poll device for its status in a loop, waiting up to timeout for a response.
+ *
+ * When the device switches state, it usually takes ~300 ms.
+ * However, when running a new firmware image, the device must calibrate its
+ * sensors, which can take as long as 2 seconds.
+ *
+ * Note: The timeout has granularity of the polling rate, which is 100 ms.
+ *
+ * Returns:
+ *   0 when the device eventually responds with a valid non-busy state.
+ *   -ETIMEDOUT if device never responds (too many -EAGAIN)
+ *   < 0    other errors
+ */
+static int cyapa_poll_state(struct cyapa *cyapa, unsigned int timeout)
+{
+       int ret;
+       int tries = timeout / 100;
+
+       ret = cyapa_get_state(cyapa);
+       while ((ret || cyapa->state >= CYAPA_STATE_BL_BUSY) && tries--) {
+               msleep(100);
+               ret = cyapa_get_state(cyapa);
+       }
+       return (ret == -EAGAIN || ret == -ETIMEDOUT) ? -ETIMEDOUT : ret;
+}
+
+static int cyapa_bl_deactivate(struct cyapa *cyapa)
+{
+       int ret;
+
+       ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_deactivate),
+                                       bl_deactivate);
+       if (ret < 0)
+               return ret;
+
+       /* wait for bootloader to switch to idle state; should take < 100ms */
+       msleep(100);
+       ret = cyapa_poll_state(cyapa, 500);
+       if (ret < 0)
+               return ret;
+       if (cyapa->state != CYAPA_STATE_BL_IDLE)
+               return -EAGAIN;
+       return 0;
+}
+
+/*
+ * Exit bootloader
+ *
+ * Send bl_exit command, then wait 50 - 100 ms to let device transition to
+ * operational mode.  If this is the first time the device's firmware is
+ * running, it can take up to 2 seconds to calibrate its sensors.  So, poll
+ * the device's new state for up to 2 seconds.
+ *
+ * Returns:
+ *   -EIO    failure while reading from device
+ *   -EAGAIN device is stuck in bootloader, b/c it has invalid firmware
+ *   0       device is supported and in operational mode
+ */
+static int cyapa_bl_exit(struct cyapa *cyapa)
+{
+       int ret;
+
+       ret = cyapa_i2c_reg_write_block(cyapa, 0, sizeof(bl_exit), bl_exit);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Wait for bootloader to exit, and operation mode to start.
+        * Normally, this takes at least 50 ms.
+        */
+       usleep_range(50000, 100000);
+       /*
+        * In addition, when a device boots for the first time after being
+        * updated to new firmware, it must first calibrate its sensors, which
+        * can take up to an additional 2 seconds.
+        */
+       ret = cyapa_poll_state(cyapa, 2000);
+       if (ret < 0)
+               return ret;
+       if (cyapa->state != CYAPA_STATE_OP)
+               return -EAGAIN;
+
+       return 0;
+}
+
+/*
+ * Set device power mode
+ *
+ */
+static int cyapa_set_power_mode(struct cyapa *cyapa, u8 power_mode)
+{
+       struct device *dev = &cyapa->client->dev;
+       int ret;
+       u8 power;
+
+       if (cyapa->state != CYAPA_STATE_OP)
+               return 0;
+
+       ret = cyapa_read_byte(cyapa, CYAPA_CMD_POWER_MODE);
+       if (ret < 0)
+               return ret;
+
+       power = ret & ~PWR_MODE_MASK;
+       power |= power_mode & PWR_MODE_MASK;
+       ret = cyapa_write_byte(cyapa, CYAPA_CMD_POWER_MODE, power);
+       if (ret < 0)
+               dev_err(dev, "failed to set power_mode 0x%02x err = %d\n",
+                       power_mode, ret);
+       return ret;
+}
+
+static int cyapa_get_query_data(struct cyapa *cyapa)
+{
+       u8 query_data[QUERY_DATA_SIZE];
+       int ret;
+
+       if (cyapa->state != CYAPA_STATE_OP)
+               return -EBUSY;
+
+       ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_QUERY, query_data);
+       if (ret < 0)
+               return ret;
+       if (ret != QUERY_DATA_SIZE)
+               return -EIO;
+
+       memcpy(&cyapa->product_id[0], &query_data[0], 5);
+       cyapa->product_id[5] = '-';
+       memcpy(&cyapa->product_id[6], &query_data[5], 6);
+       cyapa->product_id[12] = '-';
+       memcpy(&cyapa->product_id[13], &query_data[11], 2);
+       cyapa->product_id[15] = '\0';
+
+       cyapa->btn_capability = query_data[19] & CAPABILITY_BTN_MASK;
+
+       cyapa->gen = query_data[20] & 0x0f;
+
+       cyapa->max_abs_x = ((query_data[21] & 0xf0) << 4) | query_data[22];
+       cyapa->max_abs_y = ((query_data[21] & 0x0f) << 8) | query_data[23];
+
+       cyapa->physical_size_x =
+               ((query_data[24] & 0xf0) << 4) | query_data[25];
+       cyapa->physical_size_y =
+               ((query_data[24] & 0x0f) << 8) | query_data[26];
+
+       return 0;
+}
+
+/*
+ * Check if device is operational.
+ *
+ * An operational device is responding, has exited bootloader, and has
+ * firmware supported by this driver.
+ *
+ * Returns:
+ *   -EBUSY  no device or in bootloader
+ *   -EIO    failure while reading from device
+ *   -EAGAIN device is still in bootloader
+ *           if ->state = CYAPA_STATE_BL_IDLE, device has invalid firmware
+ *   -EINVAL device is in operational mode, but not supported by this driver
+ *   0       device is supported
+ */
+static int cyapa_check_is_operational(struct cyapa *cyapa)
+{
+       struct device *dev = &cyapa->client->dev;
+       static const char unique_str[] = "CYTRA";
+       int ret;
+
+       ret = cyapa_poll_state(cyapa, 2000);
+       if (ret < 0)
+               return ret;
+       switch (cyapa->state) {
+       case CYAPA_STATE_BL_ACTIVE:
+               ret = cyapa_bl_deactivate(cyapa);
+               if (ret)
+                       return ret;
+
+       /* Fallthrough state */
+       case CYAPA_STATE_BL_IDLE:
+               ret = cyapa_bl_exit(cyapa);
+               if (ret)
+                       return ret;
+
+       /* Fallthrough state */
+       case CYAPA_STATE_OP:
+               ret = cyapa_get_query_data(cyapa);
+               if (ret < 0)
+                       return ret;
+
+               /* only support firmware protocol gen3 */
+               if (cyapa->gen != CYAPA_GEN3) {
+                       dev_err(dev, "unsupported protocol version (%d)",
+                               cyapa->gen);
+                       return -EINVAL;
+               }
+
+               /* only support product ID starting with CYTRA */
+               if (memcmp(cyapa->product_id, unique_str,
+                          sizeof(unique_str) - 1) != 0) {
+                       dev_err(dev, "unsupported product ID (%s)\n",
+                               cyapa->product_id);
+                       return -EINVAL;
+               }
+               return 0;
+
+       default:
+               return -EIO;
+       }
+       return 0;
+}
+
+static irqreturn_t cyapa_irq(int irq, void *dev_id)
+{
+       struct cyapa *cyapa = dev_id;
+       struct device *dev = &cyapa->client->dev;
+       struct input_dev *input = cyapa->input;
+       struct cyapa_reg_data data;
+       int i;
+       int ret;
+       int num_fingers;
+
+       if (device_may_wakeup(dev))
+               pm_wakeup_event(dev, 0);
+
+       ret = cyapa_read_block(cyapa, CYAPA_CMD_GROUP_DATA, (u8 *)&data);
+       if (ret != sizeof(data))
+               goto out;
+
+       if ((data.device_status & OP_STATUS_SRC) != OP_STATUS_SRC ||
+           (data.device_status & OP_STATUS_DEV) != CYAPA_DEV_NORMAL ||
+           (data.finger_btn & OP_DATA_VALID) != OP_DATA_VALID) {
+               goto out;
+       }
+
+       num_fingers = (data.finger_btn >> 4) & 0x0f;
+       for (i = 0; i < num_fingers; i++) {
+               const struct cyapa_touch *touch = &data.touches[i];
+               /* Note: touch->id range is 1 to 15; slots are 0 to 14. */
+               int slot = touch->id - 1;
+
+               input_mt_slot(input, slot);
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+               input_report_abs(input, ABS_MT_POSITION_X,
+                                ((touch->xy_hi & 0xf0) << 4) | touch->x_lo);
+               input_report_abs(input, ABS_MT_POSITION_Y,
+                                ((touch->xy_hi & 0x0f) << 8) | touch->y_lo);
+               input_report_abs(input, ABS_MT_PRESSURE, touch->pressure);
+       }
+
+       input_mt_sync_frame(input);
+
+       if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+               input_report_key(input, BTN_LEFT,
+                                data.finger_btn & OP_DATA_LEFT_BTN);
+
+       if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+               input_report_key(input, BTN_MIDDLE,
+                                data.finger_btn & OP_DATA_MIDDLE_BTN);
+
+       if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+               input_report_key(input, BTN_RIGHT,
+                                data.finger_btn & OP_DATA_RIGHT_BTN);
+
+       input_sync(input);
+
+out:
+       return IRQ_HANDLED;
+}
+
+static u8 cyapa_check_adapter_functionality(struct i2c_client *client)
+{
+       u8 ret = CYAPA_ADAPTER_FUNC_NONE;
+
+       if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               ret |= CYAPA_ADAPTER_FUNC_I2C;
+       if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_BLOCK_DATA |
+                                    I2C_FUNC_SMBUS_I2C_BLOCK))
+               ret |= CYAPA_ADAPTER_FUNC_SMBUS;
+       return ret;
+}
+
+static int cyapa_create_input_dev(struct cyapa *cyapa)
+{
+       struct device *dev = &cyapa->client->dev;
+       int ret;
+       struct input_dev *input;
+
+       if (!cyapa->physical_size_x || !cyapa->physical_size_y)
+               return -EINVAL;
+
+       input = cyapa->input = input_allocate_device();
+       if (!input) {
+               dev_err(dev, "allocate memory for input device failed\n");
+               return -ENOMEM;
+       }
+
+       input->name = CYAPA_NAME;
+       input->phys = cyapa->phys;
+       input->id.bustype = BUS_I2C;
+       input->id.version = 1;
+       input->id.product = 0;  /* means any product in eventcomm. */
+       input->dev.parent = &cyapa->client->dev;
+
+       input_set_drvdata(input, cyapa);
+
+       __set_bit(EV_ABS, input->evbit);
+
+       /* finger position */
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, cyapa->max_abs_x, 0,
+                            0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cyapa->max_abs_y, 0,
+                            0);
+       input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+       input_abs_set_res(input, ABS_MT_POSITION_X,
+                         cyapa->max_abs_x / cyapa->physical_size_x);
+       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                         cyapa->max_abs_y / cyapa->physical_size_y);
+
+       if (cyapa->btn_capability & CAPABILITY_LEFT_BTN_MASK)
+               __set_bit(BTN_LEFT, input->keybit);
+       if (cyapa->btn_capability & CAPABILITY_MIDDLE_BTN_MASK)
+               __set_bit(BTN_MIDDLE, input->keybit);
+       if (cyapa->btn_capability & CAPABILITY_RIGHT_BTN_MASK)
+               __set_bit(BTN_RIGHT, input->keybit);
+
+       if (cyapa->btn_capability == CAPABILITY_LEFT_BTN_MASK)
+               __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+
+       /* handle pointer emulation and unused slots in core */
+       ret = input_mt_init_slots(input, CYAPA_MAX_MT_SLOTS,
+                                 INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED);
+       if (ret) {
+               dev_err(dev, "allocate memory for MT slots failed, %d\n", ret);
+               goto err_free_device;
+       }
+
+       /* Register the device in input subsystem */
+       ret = input_register_device(input);
+       if (ret) {
+               dev_err(dev, "input device register failed, %d\n", ret);
+               goto err_free_device;
+       }
+       return 0;
+
+err_free_device:
+       input_free_device(input);
+       cyapa->input = NULL;
+       return ret;
+}
+
+static int cyapa_probe(struct i2c_client *client,
+                      const struct i2c_device_id *dev_id)
+{
+       int ret;
+       u8 adapter_func;
+       struct cyapa *cyapa;
+       struct device *dev = &client->dev;
+
+       adapter_func = cyapa_check_adapter_functionality(client);
+       if (adapter_func == CYAPA_ADAPTER_FUNC_NONE) {
+               dev_err(dev, "not a supported I2C/SMBus adapter\n");
+               return -EIO;
+       }
+
+       cyapa = kzalloc(sizeof(struct cyapa), GFP_KERNEL);
+       if (!cyapa) {
+               dev_err(dev, "allocate memory for cyapa failed\n");
+               return -ENOMEM;
+       }
+
+       cyapa->gen = CYAPA_GEN3;
+       cyapa->client = client;
+       i2c_set_clientdata(client, cyapa);
+       sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
+               client->addr);
+
+       /* i2c isn't supported, use smbus */
+       if (adapter_func == CYAPA_ADAPTER_FUNC_SMBUS)
+               cyapa->smbus = true;
+       cyapa->state = CYAPA_STATE_NO_DEVICE;
+       ret = cyapa_check_is_operational(cyapa);
+       if (ret) {
+               dev_err(dev, "device not operational, %d\n", ret);
+               goto err_mem_free;
+       }
+
+       ret = cyapa_create_input_dev(cyapa);
+       if (ret) {
+               dev_err(dev, "create input_dev instance failed, %d\n", ret);
+               goto err_mem_free;
+       }
+
+       ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+       if (ret) {
+               dev_err(dev, "set active power failed, %d\n", ret);
+               goto err_unregister_device;
+       }
+
+       cyapa->irq = client->irq;
+       ret = request_threaded_irq(cyapa->irq,
+                                  NULL,
+                                  cyapa_irq,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  "cyapa",
+                                  cyapa);
+       if (ret) {
+               dev_err(dev, "IRQ request failed: %d\n, ", ret);
+               goto err_unregister_device;
+       }
+
+       return 0;
+
+err_unregister_device:
+       input_unregister_device(cyapa->input);
+err_mem_free:
+       kfree(cyapa);
+
+       return ret;
+}
+
+static int cyapa_remove(struct i2c_client *client)
+{
+       struct cyapa *cyapa = i2c_get_clientdata(client);
+
+       free_irq(cyapa->irq, cyapa);
+       input_unregister_device(cyapa->input);
+       cyapa_set_power_mode(cyapa, PWR_MODE_OFF);
+       kfree(cyapa);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cyapa_suspend(struct device *dev)
+{
+       int ret;
+       u8 power_mode;
+       struct cyapa *cyapa = dev_get_drvdata(dev);
+
+       disable_irq(cyapa->irq);
+
+       /*
+        * Set trackpad device to idle mode if wakeup is allowed,
+        * otherwise turn off.
+        */
+       power_mode = device_may_wakeup(dev) ? PWR_MODE_IDLE
+                                           : PWR_MODE_OFF;
+       ret = cyapa_set_power_mode(cyapa, power_mode);
+       if (ret < 0)
+               dev_err(dev, "set power mode failed, %d\n", ret);
+
+       if (device_may_wakeup(dev))
+               cyapa->irq_wake = (enable_irq_wake(cyapa->irq) == 0);
+       return 0;
+}
+
+static int cyapa_resume(struct device *dev)
+{
+       int ret;
+       struct cyapa *cyapa = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev) && cyapa->irq_wake)
+               disable_irq_wake(cyapa->irq);
+
+       ret = cyapa_set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE);
+       if (ret)
+               dev_warn(dev, "resume active power failed, %d\n", ret);
+
+       enable_irq(cyapa->irq);
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(cyapa_pm_ops, cyapa_suspend, cyapa_resume);
+
+static const struct i2c_device_id cyapa_id_table[] = {
+       { "cyapa", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, cyapa_id_table);
+
+static struct i2c_driver cyapa_driver = {
+       .driver = {
+               .name = "cyapa",
+               .owner = THIS_MODULE,
+               .pm = &cyapa_pm_ops,
+       },
+
+       .probe = cyapa_probe,
+       .remove = cyapa_remove,
+       .id_table = cyapa_id_table,
+};
+
+module_i2c_driver(cyapa_driver);
+
+MODULE_DESCRIPTION("Cypress APA I2C Trackpad Driver");
+MODULE_AUTHOR("Dudley Du <dudl@cypress.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
new file mode 100644 (file)
index 0000000..1673dc6
--- /dev/null
@@ -0,0 +1,725 @@
+/*
+ * Cypress Trackpad PS/2 mouse driver
+ *
+ * Copyright (c) 2012 Cypress Semiconductor Corporation.
+ *
+ * Author:
+ *   Dudley Du <dudl@cypress.com>
+ *
+ * Additional contributors include:
+ *   Kamal Mostafa <kamal@canonical.com>
+ *   Kyle Fazzari <git@status.e4ward.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "cypress_ps2.h"
+
+#undef CYTP_DEBUG_VERBOSE  /* define this and DEBUG for more verbose dump */
+
+static void cypress_set_packet_size(struct psmouse *psmouse, unsigned int n)
+{
+       struct cytp_data *cytp = psmouse->private;
+       cytp->pkt_size = n;
+}
+
+static const unsigned char cytp_rate[] = {10, 20, 40, 60, 100, 200};
+static const unsigned char cytp_resolution[] = {0x00, 0x01, 0x02, 0x03};
+
+static int cypress_ps2_sendbyte(struct psmouse *psmouse, int value)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+       if (ps2_sendbyte(ps2dev, value & 0xff, CYTP_CMD_TIMEOUT) < 0) {
+               psmouse_dbg(psmouse,
+                               "sending command 0x%02x failed, resp 0x%02x\n",
+                               value & 0xff, ps2dev->nak);
+               if (ps2dev->nak == CYTP_PS2_RETRY)
+                       return CYTP_PS2_RETRY;
+               else
+                       return CYTP_PS2_ERROR;
+       }
+
+#ifdef CYTP_DEBUG_VERBOSE
+       psmouse_dbg(psmouse, "sending command 0x%02x succeeded, resp 0xfa\n",
+                       value & 0xff);
+#endif
+
+       return 0;
+}
+
+static int cypress_ps2_ext_cmd(struct psmouse *psmouse, unsigned short cmd,
+                              unsigned char data)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       int tries = CYTP_PS2_CMD_TRIES;
+       int rc;
+
+       ps2_begin_command(ps2dev);
+
+       do {
+               /*
+                * Send extension command byte (0xE8 or 0xF3).
+                * If sending the command fails, send recovery command
+                * to make the device return to the ready state.
+                */
+               rc = cypress_ps2_sendbyte(psmouse, cmd & 0xff);
+               if (rc == CYTP_PS2_RETRY) {
+                       rc = cypress_ps2_sendbyte(psmouse, 0x00);
+                       if (rc == CYTP_PS2_RETRY)
+                               rc = cypress_ps2_sendbyte(psmouse, 0x0a);
+               }
+               if (rc == CYTP_PS2_ERROR)
+                       continue;
+
+               rc = cypress_ps2_sendbyte(psmouse, data);
+               if (rc == CYTP_PS2_RETRY)
+                       rc = cypress_ps2_sendbyte(psmouse, data);
+               if (rc == CYTP_PS2_ERROR)
+                       continue;
+               else
+                       break;
+       } while (--tries > 0);
+
+       ps2_end_command(ps2dev);
+
+       return rc;
+}
+
+static int cypress_ps2_read_cmd_status(struct psmouse *psmouse,
+                                      unsigned char cmd,
+                                      unsigned char *param)
+{
+       int rc;
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       enum psmouse_state old_state;
+       int pktsize;
+
+       ps2_begin_command(&psmouse->ps2dev);
+
+       old_state = psmouse->state;
+       psmouse->state = PSMOUSE_CMD_MODE;
+       psmouse->pktcnt = 0;
+
+       pktsize = (cmd == CYTP_CMD_READ_TP_METRICS) ? 8 : 3;
+       memset(param, 0, pktsize);
+
+       rc = cypress_ps2_sendbyte(psmouse, 0xe9);
+       if (rc < 0)
+               goto out;
+
+       wait_event_timeout(ps2dev->wait,
+                       (psmouse->pktcnt >= pktsize),
+                       msecs_to_jiffies(CYTP_CMD_TIMEOUT));
+
+       memcpy(param, psmouse->packet, pktsize);
+
+       psmouse_dbg(psmouse, "Command 0x%02x response data (0x): %*ph\n",
+                       cmd, pktsize, param);
+
+out:
+       psmouse->state = old_state;
+       psmouse->pktcnt = 0;
+
+       ps2_end_command(&psmouse->ps2dev);
+
+       return rc;
+}
+
+static bool cypress_verify_cmd_state(struct psmouse *psmouse,
+                                    unsigned char cmd, unsigned char *param)
+{
+       bool rate_match = false;
+       bool resolution_match = false;
+       int i;
+
+       /* callers will do further checking. */
+       if (cmd == CYTP_CMD_READ_CYPRESS_ID ||
+           cmd == CYTP_CMD_STANDARD_MODE ||
+           cmd == CYTP_CMD_READ_TP_METRICS)
+               return true;
+
+       if ((~param[0] & DFLT_RESP_BITS_VALID) == DFLT_RESP_BITS_VALID &&
+           (param[0] & DFLT_RESP_BIT_MODE) == DFLT_RESP_STREAM_MODE) {
+               for (i = 0; i < sizeof(cytp_resolution); i++)
+                       if (cytp_resolution[i] == param[1])
+                               resolution_match = true;
+
+               for (i = 0; i < sizeof(cytp_rate); i++)
+                       if (cytp_rate[i] == param[2])
+                               rate_match = true;
+
+               if (resolution_match && rate_match)
+                       return true;
+       }
+
+       psmouse_dbg(psmouse, "verify cmd state failed.\n");
+       return false;
+}
+
+static int cypress_send_ext_cmd(struct psmouse *psmouse, unsigned char cmd,
+                               unsigned char *param)
+{
+       int tries = CYTP_PS2_CMD_TRIES;
+       int rc;
+
+       psmouse_dbg(psmouse, "send extension cmd 0x%02x, [%d %d %d %d]\n",
+                cmd, DECODE_CMD_AA(cmd), DECODE_CMD_BB(cmd),
+                DECODE_CMD_CC(cmd), DECODE_CMD_DD(cmd));
+
+       do {
+               cypress_ps2_ext_cmd(psmouse,
+                                   PSMOUSE_CMD_SETRES, DECODE_CMD_DD(cmd));
+               cypress_ps2_ext_cmd(psmouse,
+                                   PSMOUSE_CMD_SETRES, DECODE_CMD_CC(cmd));
+               cypress_ps2_ext_cmd(psmouse,
+                                   PSMOUSE_CMD_SETRES, DECODE_CMD_BB(cmd));
+               cypress_ps2_ext_cmd(psmouse,
+                                   PSMOUSE_CMD_SETRES, DECODE_CMD_AA(cmd));
+
+               rc = cypress_ps2_read_cmd_status(psmouse, cmd, param);
+               if (rc)
+                       continue;
+
+               if (cypress_verify_cmd_state(psmouse, cmd, param))
+                       return 0;
+
+       } while (--tries > 0);
+
+       return -EIO;
+}
+
+int cypress_detect(struct psmouse *psmouse, bool set_properties)
+{
+       unsigned char param[3];
+
+       if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
+               return -ENODEV;
+
+       /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
+       if (param[0] != 0x33 || param[1] != 0xCC)
+               return -ENODEV;
+
+       if (set_properties) {
+               psmouse->vendor = "Cypress";
+               psmouse->name = "Trackpad";
+       }
+
+       return 0;
+}
+
+static int cypress_read_fw_version(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp = psmouse->private;
+       unsigned char param[3];
+
+       if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_CYPRESS_ID, param))
+               return -ENODEV;
+
+       /* Check for Cypress Trackpad signature bytes: 0x33 0xCC */
+       if (param[0] != 0x33 || param[1] != 0xCC)
+               return -ENODEV;
+
+       cytp->fw_version = param[2] & FW_VERSION_MASX;
+       cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
+
+       psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
+       psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
+                cytp->tp_metrics_supported);
+
+       return 0;
+}
+
+static int cypress_read_tp_metrics(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp = psmouse->private;
+       unsigned char param[8];
+
+       /* set default values for tp metrics. */
+       cytp->tp_width = CYTP_DEFAULT_WIDTH;
+       cytp->tp_high = CYTP_DEFAULT_HIGH;
+       cytp->tp_max_abs_x = CYTP_ABS_MAX_X;
+       cytp->tp_max_abs_y = CYTP_ABS_MAX_Y;
+       cytp->tp_min_pressure = CYTP_MIN_PRESSURE;
+       cytp->tp_max_pressure = CYTP_MAX_PRESSURE;
+       cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
+       cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+
+       memset(param, 0, sizeof(param));
+       if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
+               /* Update trackpad parameters. */
+               cytp->tp_max_abs_x = (param[1] << 8) | param[0];
+               cytp->tp_max_abs_y = (param[3] << 8) | param[2];
+               cytp->tp_min_pressure = param[4];
+               cytp->tp_max_pressure = param[5];
+       }
+
+       if (!cytp->tp_max_pressure ||
+           cytp->tp_max_pressure < cytp->tp_min_pressure ||
+           !cytp->tp_width || !cytp->tp_high ||
+           !cytp->tp_max_abs_x ||
+           cytp->tp_max_abs_x < cytp->tp_width ||
+           !cytp->tp_max_abs_y ||
+           cytp->tp_max_abs_y < cytp->tp_high)
+               return -EINVAL;
+
+       cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
+       cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
+
+#ifdef CYTP_DEBUG_VERBOSE
+       psmouse_dbg(psmouse, "Dump trackpad hardware configuration as below:\n");
+       psmouse_dbg(psmouse, "cytp->tp_width = %d\n", cytp->tp_width);
+       psmouse_dbg(psmouse, "cytp->tp_high = %d\n", cytp->tp_high);
+       psmouse_dbg(psmouse, "cytp->tp_max_abs_x = %d\n", cytp->tp_max_abs_x);
+       psmouse_dbg(psmouse, "cytp->tp_max_abs_y = %d\n", cytp->tp_max_abs_y);
+       psmouse_dbg(psmouse, "cytp->tp_min_pressure = %d\n", cytp->tp_min_pressure);
+       psmouse_dbg(psmouse, "cytp->tp_max_pressure = %d\n", cytp->tp_max_pressure);
+       psmouse_dbg(psmouse, "cytp->tp_res_x = %d\n", cytp->tp_res_x);
+       psmouse_dbg(psmouse, "cytp->tp_res_y = %d\n", cytp->tp_res_y);
+
+       psmouse_dbg(psmouse, "tp_type_APA = %d\n",
+                       (param[6] & TP_METRICS_BIT_APA) ? 1 : 0);
+       psmouse_dbg(psmouse, "tp_type_MTG = %d\n",
+                       (param[6] & TP_METRICS_BIT_MTG) ? 1 : 0);
+       psmouse_dbg(psmouse, "tp_palm = %d\n",
+                       (param[6] & TP_METRICS_BIT_PALM) ? 1 : 0);
+       psmouse_dbg(psmouse, "tp_stubborn = %d\n",
+                       (param[6] & TP_METRICS_BIT_STUBBORN) ? 1 : 0);
+       psmouse_dbg(psmouse, "tp_1f_jitter = %d\n",
+                       (param[6] & TP_METRICS_BIT_1F_JITTER) >> 2);
+       psmouse_dbg(psmouse, "tp_2f_jitter = %d\n",
+                       (param[6] & TP_METRICS_BIT_2F_JITTER) >> 4);
+       psmouse_dbg(psmouse, "tp_1f_spike = %d\n",
+                       param[7] & TP_METRICS_BIT_1F_SPIKE);
+       psmouse_dbg(psmouse, "tp_2f_spike = %d\n",
+                       (param[7] & TP_METRICS_BIT_2F_SPIKE) >> 2);
+       psmouse_dbg(psmouse, "tp_abs_packet_format_set = %d\n",
+                       (param[7] & TP_METRICS_BIT_ABS_PKT_FORMAT_SET) >> 4);
+#endif
+
+       return 0;
+}
+
+static int cypress_query_hardware(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp = psmouse->private;
+       int ret;
+
+       ret = cypress_read_fw_version(psmouse);
+       if (ret)
+               return ret;
+
+       if (cytp->tp_metrics_supported) {
+               ret = cypress_read_tp_metrics(psmouse);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int cypress_set_absolute_mode(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp = psmouse->private;
+       unsigned char param[3];
+
+       if (cypress_send_ext_cmd(psmouse, CYTP_CMD_ABS_WITH_PRESSURE_MODE, param) < 0)
+               return -1;
+
+       cytp->mode = (cytp->mode & ~CYTP_BIT_ABS_REL_MASK)
+                       | CYTP_BIT_ABS_PRESSURE;
+       cypress_set_packet_size(psmouse, 5);
+
+       return 0;
+}
+
+/*
+ * Reset trackpad device.
+ * This is also the default mode when trackpad powered on.
+ */
+static void cypress_reset(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp = psmouse->private;
+
+       cytp->mode = 0;
+
+       psmouse_reset(psmouse);
+}
+
+static int cypress_set_input_params(struct input_dev *input,
+                                   struct cytp_data *cytp)
+{
+       int ret;
+
+       if (!cytp->tp_res_x || !cytp->tp_res_y)
+               return -EINVAL;
+
+       __set_bit(EV_ABS, input->evbit);
+       input_set_abs_params(input, ABS_X, 0, cytp->tp_max_abs_x, 0, 0);
+       input_set_abs_params(input, ABS_Y, 0, cytp->tp_max_abs_y, 0, 0);
+       input_set_abs_params(input, ABS_PRESSURE,
+                            cytp->tp_min_pressure, cytp->tp_max_pressure, 0, 0);
+       input_set_abs_params(input, ABS_TOOL_WIDTH, 0, 255, 0, 0);
+
+       /* finger position */
+       input_set_abs_params(input, ABS_MT_POSITION_X, 0, cytp->tp_max_abs_x, 0, 0);
+       input_set_abs_params(input, ABS_MT_POSITION_Y, 0, cytp->tp_max_abs_y, 0, 0);
+       input_set_abs_params(input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
+       ret = input_mt_init_slots(input, CYTP_MAX_MT_SLOTS,
+                       INPUT_MT_DROP_UNUSED|INPUT_MT_TRACK);
+       if (ret < 0)
+               return ret;
+
+       __set_bit(INPUT_PROP_SEMI_MT, input->propbit);
+
+       input_abs_set_res(input, ABS_X, cytp->tp_res_x);
+       input_abs_set_res(input, ABS_Y, cytp->tp_res_y);
+
+       input_abs_set_res(input, ABS_MT_POSITION_X, cytp->tp_res_x);
+       input_abs_set_res(input, ABS_MT_POSITION_Y, cytp->tp_res_y);
+
+       __set_bit(BTN_TOUCH, input->keybit);
+       __set_bit(BTN_TOOL_FINGER, input->keybit);
+       __set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
+       __set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
+       __set_bit(BTN_TOOL_QUADTAP, input->keybit);
+       __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
+
+       __clear_bit(EV_REL, input->evbit);
+       __clear_bit(REL_X, input->relbit);
+       __clear_bit(REL_Y, input->relbit);
+
+       __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+       __set_bit(EV_KEY, input->evbit);
+       __set_bit(BTN_LEFT, input->keybit);
+       __set_bit(BTN_RIGHT, input->keybit);
+       __set_bit(BTN_MIDDLE, input->keybit);
+
+       input_set_drvdata(input, cytp);
+
+       return 0;
+}
+
+static int cypress_get_finger_count(unsigned char header_byte)
+{
+       unsigned char bits6_7;
+       int finger_count;
+
+       bits6_7 = header_byte >> 6;
+       finger_count = bits6_7 & 0x03;
+
+       if (finger_count == 1)
+               return 1;
+
+       if (header_byte & ABS_HSCROLL_BIT) {
+               /* HSCROLL gets added on to 0 finger count. */
+               switch (finger_count) {
+                       case 0: return 4;
+                       case 2: return 5;
+                       default:
+                               /* Invalid contact (e.g. palm). Ignore it. */
+                               return -1;
+               }
+       }
+
+       return finger_count;
+}
+
+
+static int cypress_parse_packet(struct psmouse *psmouse,
+                               struct cytp_data *cytp, struct cytp_report_data *report_data)
+{
+       unsigned char *packet = psmouse->packet;
+       unsigned char header_byte = packet[0];
+       int contact_cnt;
+
+       memset(report_data, 0, sizeof(struct cytp_report_data));
+
+       contact_cnt = cypress_get_finger_count(header_byte);
+
+       if (contact_cnt < 0) /* e.g. palm detect */
+               return -EINVAL;
+
+       report_data->contact_cnt = contact_cnt;
+
+       report_data->tap = (header_byte & ABS_MULTIFINGER_TAP) ? 1 : 0;
+
+       if (report_data->contact_cnt == 1) {
+               report_data->contacts[0].x =
+                       ((packet[1] & 0x70) << 4) | packet[2];
+               report_data->contacts[0].y =
+                       ((packet[1] & 0x07) << 8) | packet[3];
+               if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+                       report_data->contacts[0].z = packet[4];
+
+       } else if (report_data->contact_cnt >= 2) {
+               report_data->contacts[0].x =
+                       ((packet[1] & 0x70) << 4) | packet[2];
+               report_data->contacts[0].y =
+                       ((packet[1] & 0x07) << 8) | packet[3];
+               if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+                       report_data->contacts[0].z = packet[4];
+
+               report_data->contacts[1].x =
+                       ((packet[5] & 0xf0) << 4) | packet[6];
+               report_data->contacts[1].y =
+                       ((packet[5] & 0x0f) << 8) | packet[7];
+               if (cytp->mode & CYTP_BIT_ABS_PRESSURE)
+                       report_data->contacts[1].z = report_data->contacts[0].z;
+       }
+
+       report_data->left = (header_byte & BTN_LEFT_BIT) ? 1 : 0;
+       report_data->right = (header_byte & BTN_RIGHT_BIT) ? 1 : 0;
+
+       /*
+        * This is only true if one of the mouse buttons were tapped.  Make
+        * sure it doesn't turn into a click. The regular tap-to-click
+        * functionality will handle that on its own. If we don't do this,
+        * disabling tap-to-click won't affect the mouse button zones.
+        */
+       if (report_data->tap)
+               report_data->left = 0;
+
+#ifdef CYTP_DEBUG_VERBOSE
+       {
+               int i;
+               int n = report_data->contact_cnt;
+               psmouse_dbg(psmouse, "Dump parsed report data as below:\n");
+               psmouse_dbg(psmouse, "contact_cnt = %d\n",
+                       report_data->contact_cnt);
+               if (n > CYTP_MAX_MT_SLOTS)
+                   n = CYTP_MAX_MT_SLOTS;
+               for (i = 0; i < n; i++)
+                       psmouse_dbg(psmouse, "contacts[%d] = {%d, %d, %d}\n", i,
+                                       report_data->contacts[i].x,
+                                       report_data->contacts[i].y,
+                                       report_data->contacts[i].z);
+               psmouse_dbg(psmouse, "left = %d\n", report_data->left);
+               psmouse_dbg(psmouse, "right = %d\n", report_data->right);
+               psmouse_dbg(psmouse, "middle = %d\n", report_data->middle);
+       }
+#endif
+
+       return 0;
+}
+
+static void cypress_process_packet(struct psmouse *psmouse, bool zero_pkt)
+{
+       int i;
+       struct input_dev *input = psmouse->dev;
+       struct cytp_data *cytp = psmouse->private;
+       struct cytp_report_data report_data;
+       struct cytp_contact *contact;
+       struct input_mt_pos pos[CYTP_MAX_MT_SLOTS];
+       int slots[CYTP_MAX_MT_SLOTS];
+       int n;
+
+       if (cypress_parse_packet(psmouse, cytp, &report_data))
+               return;
+
+       n = report_data.contact_cnt;
+
+       if (n > CYTP_MAX_MT_SLOTS)
+               n = CYTP_MAX_MT_SLOTS;
+
+       for (i = 0; i < n; i++) {
+               contact = &report_data.contacts[i];
+               pos[i].x = contact->x;
+               pos[i].y = contact->y;
+       }
+
+       input_mt_assign_slots(input, slots, pos, n);
+
+       for (i = 0; i < n; i++) {
+               contact = &report_data.contacts[i];
+               input_mt_slot(input, slots[i]);
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+               input_report_abs(input, ABS_MT_POSITION_X, contact->x);
+               input_report_abs(input, ABS_MT_POSITION_Y, contact->y);
+               input_report_abs(input, ABS_MT_PRESSURE, contact->z);
+       }
+
+       input_mt_sync_frame(input);
+
+       input_mt_report_finger_count(input, report_data.contact_cnt);
+
+       input_report_key(input, BTN_LEFT, report_data.left);
+       input_report_key(input, BTN_RIGHT, report_data.right);
+       input_report_key(input, BTN_MIDDLE, report_data.middle);
+
+       input_sync(input);
+}
+
+static psmouse_ret_t cypress_validate_byte(struct psmouse *psmouse)
+{
+       int contact_cnt;
+       int index = psmouse->pktcnt - 1;
+       unsigned char *packet = psmouse->packet;
+       struct cytp_data *cytp = psmouse->private;
+
+       if (index < 0 || index > cytp->pkt_size)
+               return PSMOUSE_BAD_DATA;
+
+       if (index == 0 && (packet[0] & 0xfc) == 0) {
+               /* call packet process for reporting finger leave. */
+               cypress_process_packet(psmouse, 1);
+               return PSMOUSE_FULL_PACKET;
+       }
+
+       /*
+        * Perform validation (and adjust packet size) based only on the
+        * first byte; allow all further bytes through.
+        */
+       if (index != 0)
+               return PSMOUSE_GOOD_DATA;
+
+       /*
+        * If absolute/relative mode bit has not been set yet, just pass
+        * the byte through.
+        */
+       if ((cytp->mode & CYTP_BIT_ABS_REL_MASK) == 0)
+               return PSMOUSE_GOOD_DATA;
+
+       if ((packet[0] & 0x08) == 0x08)
+               return PSMOUSE_BAD_DATA;
+
+       contact_cnt = cypress_get_finger_count(packet[0]);
+
+       if (contact_cnt < 0)
+               return PSMOUSE_BAD_DATA;
+
+       if (cytp->mode & CYTP_BIT_ABS_NO_PRESSURE)
+               cypress_set_packet_size(psmouse, contact_cnt == 2 ? 7 : 4);
+       else
+               cypress_set_packet_size(psmouse, contact_cnt == 2 ? 8 : 5);
+
+       return PSMOUSE_GOOD_DATA;
+}
+
+static psmouse_ret_t cypress_protocol_handler(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp = psmouse->private;
+
+       if (psmouse->pktcnt >= cytp->pkt_size) {
+               cypress_process_packet(psmouse, 0);
+               return PSMOUSE_FULL_PACKET;
+       }
+
+       return cypress_validate_byte(psmouse);
+}
+
+static void cypress_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+       struct cytp_data *cytp = psmouse->private;
+
+       if (rate >= 80) {
+               psmouse->rate = 80;
+               cytp->mode |= CYTP_BIT_HIGH_RATE;
+       } else {
+               psmouse->rate = 40;
+               cytp->mode &= ~CYTP_BIT_HIGH_RATE;
+       }
+
+       ps2_command(&psmouse->ps2dev, (unsigned char *)&psmouse->rate,
+                   PSMOUSE_CMD_SETRATE);
+}
+
+static void cypress_disconnect(struct psmouse *psmouse)
+{
+       cypress_reset(psmouse);
+       kfree(psmouse->private);
+       psmouse->private = NULL;
+}
+
+static int cypress_reconnect(struct psmouse *psmouse)
+{
+       int tries = CYTP_PS2_CMD_TRIES;
+       int rc;
+
+       do {
+               cypress_reset(psmouse);
+               rc = cypress_detect(psmouse, false);
+       } while (rc && (--tries > 0));
+
+       if (rc) {
+               psmouse_err(psmouse, "Reconnect: unable to detect trackpad.\n");
+               return -1;
+       }
+
+       if (cypress_set_absolute_mode(psmouse)) {
+               psmouse_err(psmouse, "Reconnect: Unable to initialize Cypress absolute mode.\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+int cypress_init(struct psmouse *psmouse)
+{
+       struct cytp_data *cytp;
+
+       cytp = (struct cytp_data *)kzalloc(sizeof(struct cytp_data), GFP_KERNEL);
+       psmouse->private = (void *)cytp;
+       if (cytp == NULL)
+               return -ENOMEM;
+
+       cypress_reset(psmouse);
+
+       psmouse->pktsize = 8;
+
+       if (cypress_query_hardware(psmouse)) {
+               psmouse_err(psmouse, "Unable to query Trackpad hardware.\n");
+               goto err_exit;
+       }
+
+       if (cypress_set_absolute_mode(psmouse)) {
+               psmouse_err(psmouse, "init: Unable to initialize Cypress absolute mode.\n");
+               goto err_exit;
+       }
+
+       if (cypress_set_input_params(psmouse->dev, cytp) < 0) {
+               psmouse_err(psmouse, "init: Unable to set input params.\n");
+               goto err_exit;
+       }
+
+       psmouse->model = 1;
+       psmouse->protocol_handler = cypress_protocol_handler;
+       psmouse->set_rate = cypress_set_rate;
+       psmouse->disconnect = cypress_disconnect;
+       psmouse->reconnect = cypress_reconnect;
+       psmouse->cleanup = cypress_reset;
+       psmouse->resync_time = 0;
+
+       return 0;
+
+err_exit:
+       /*
+        * Reset Cypress Trackpad as a standard mouse. Then
+        * let psmouse driver commmunicating with it as default PS2 mouse.
+        */
+       cypress_reset(psmouse);
+
+       psmouse->private = NULL;
+       kfree(cytp);
+
+       return -1;
+}
+
+bool cypress_supported(void)
+{
+       return true;
+}
diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h
new file mode 100644 (file)
index 0000000..4720f21
--- /dev/null
@@ -0,0 +1,191 @@
+#ifndef _CYPRESS_PS2_H
+#define _CYPRESS_PS2_H
+
+#include "psmouse.h"
+
+#define CMD_BITS_MASK 0x03
+#define COMPOSIT(x, s) (((x) & CMD_BITS_MASK) << (s))
+
+#define ENCODE_CMD(aa, bb, cc, dd) \
+       (COMPOSIT((aa), 6) | COMPOSIT((bb), 4) | COMPOSIT((cc), 2) | COMPOSIT((dd), 0))
+#define CYTP_CMD_ABS_NO_PRESSURE_MODE       ENCODE_CMD(0, 1, 0, 0)
+#define CYTP_CMD_ABS_WITH_PRESSURE_MODE     ENCODE_CMD(0, 1, 0, 1)
+#define CYTP_CMD_SMBUS_MODE                 ENCODE_CMD(0, 1, 1, 0)
+#define CYTP_CMD_STANDARD_MODE              ENCODE_CMD(0, 2, 0, 0)  /* not implemented yet. */
+#define CYTP_CMD_CYPRESS_REL_MODE           ENCODE_CMD(1, 1, 1, 1)  /* not implemented yet. */
+#define CYTP_CMD_READ_CYPRESS_ID            ENCODE_CMD(0, 0, 0, 0)
+#define CYTP_CMD_READ_TP_METRICS            ENCODE_CMD(0, 0, 0, 1)
+#define CYTP_CMD_SET_HSCROLL_WIDTH(w)       ENCODE_CMD(1, 1, 0, (w))
+#define     CYTP_CMD_SET_HSCROLL_MASK       ENCODE_CMD(1, 1, 0, 0)
+#define CYTP_CMD_SET_VSCROLL_WIDTH(w)       ENCODE_CMD(1, 2, 0, (w))
+#define     CYTP_CMD_SET_VSCROLL_MASK       ENCODE_CMD(1, 2, 0, 0)
+#define CYTP_CMD_SET_PALM_GEOMETRY(e)       ENCODE_CMD(1, 2, 1, (e))
+#define     CYTP_CMD_PALM_GEMMETRY_MASK     ENCODE_CMD(1, 2, 1, 0)
+#define CYTP_CMD_SET_PALM_SENSITIVITY(s)    ENCODE_CMD(1, 2, 2, (s))
+#define     CYTP_CMD_PALM_SENSITIVITY_MASK  ENCODE_CMD(1, 2, 2, 0)
+#define CYTP_CMD_SET_MOUSE_SENSITIVITY(s)   ENCODE_CMD(1, 3, ((s) >> 2), (s))
+#define     CYTP_CMD_MOUSE_SENSITIVITY_MASK ENCODE_CMD(1, 3, 0, 0)
+#define CYTP_CMD_REQUEST_BASELINE_STATUS    ENCODE_CMD(2, 0, 0, 1)
+#define CYTP_CMD_REQUEST_RECALIBRATION      ENCODE_CMD(2, 0, 0, 3)
+
+#define DECODE_CMD_AA(x) (((x) >> 6) & CMD_BITS_MASK)
+#define DECODE_CMD_BB(x) (((x) >> 4) & CMD_BITS_MASK)
+#define DECODE_CMD_CC(x) (((x) >> 2) & CMD_BITS_MASK)
+#define DECODE_CMD_DD(x) ((x) & CMD_BITS_MASK)
+
+/* Cypress trackpad working mode. */
+#define CYTP_BIT_ABS_PRESSURE    (1 << 3)
+#define CYTP_BIT_ABS_NO_PRESSURE (1 << 2)
+#define CYTP_BIT_CYPRESS_REL     (1 << 1)
+#define CYTP_BIT_STANDARD_REL    (1 << 0)
+#define CYTP_BIT_REL_MASK (CYTP_BIT_CYPRESS_REL | CYTP_BIT_STANDARD_REL)
+#define CYTP_BIT_ABS_MASK (CYTP_BIT_ABS_PRESSURE | CYTP_BIT_ABS_NO_PRESSURE)
+#define CYTP_BIT_ABS_REL_MASK (CYTP_BIT_ABS_MASK | CYTP_BIT_REL_MASK)
+
+#define CYTP_BIT_HIGH_RATE       (1 << 4)
+/*
+ * report mode bit is set, firmware working in Remote Mode.
+ * report mode bit is cleared, firmware working in Stream Mode.
+ */
+#define CYTP_BIT_REPORT_MODE     (1 << 5)
+
+/* scrolling width values for set HSCROLL and VSCROLL width command. */
+#define SCROLL_WIDTH_NARROW 1
+#define SCROLL_WIDTH_NORMAL 2
+#define SCROLL_WIDTH_WIDE   3
+
+#define PALM_GEOMETRY_ENABLE  1
+#define PALM_GEOMETRY_DISABLE 0
+
+#define TP_METRICS_MASK  0x80
+#define FW_VERSION_MASX    0x7f
+#define FW_VER_HIGH_MASK 0x70
+#define FW_VER_LOW_MASK  0x0f
+
+/* Times to retry a ps2_command and millisecond delay between tries. */
+#define CYTP_PS2_CMD_TRIES 3
+#define CYTP_PS2_CMD_DELAY 500
+
+/* time out for PS/2 command only in milliseconds. */
+#define CYTP_CMD_TIMEOUT  200
+#define CYTP_DATA_TIMEOUT 30
+
+#define CYTP_EXT_CMD   0xe8
+#define CYTP_PS2_RETRY 0xfe
+#define CYTP_PS2_ERROR 0xfc
+
+#define CYTP_RESP_RETRY 0x01
+#define CYTP_RESP_ERROR 0xfe
+
+
+#define CYTP_105001_WIDTH  97   /* Dell XPS 13 */
+#define CYTP_105001_HIGH   59
+#define CYTP_DEFAULT_WIDTH (CYTP_105001_WIDTH)
+#define CYTP_DEFAULT_HIGH  (CYTP_105001_HIGH)
+
+#define CYTP_ABS_MAX_X     1600
+#define CYTP_ABS_MAX_Y     900
+#define CYTP_MAX_PRESSURE  255
+#define CYTP_MIN_PRESSURE  0
+
+/* header byte bits of relative package. */
+#define BTN_LEFT_BIT   0x01
+#define BTN_RIGHT_BIT  0x02
+#define BTN_MIDDLE_BIT 0x04
+#define REL_X_SIGN_BIT 0x10
+#define REL_Y_SIGN_BIT 0x20
+
+/* header byte bits of absolute package. */
+#define ABS_VSCROLL_BIT 0x10
+#define ABS_HSCROLL_BIT 0x20
+#define ABS_MULTIFINGER_TAP 0x04
+#define ABS_EDGE_MOTION_MASK 0x80
+
+#define DFLT_RESP_BITS_VALID     0x88  /* SMBus bit should not be set. */
+#define DFLT_RESP_SMBUS_BIT      0x80
+#define   DFLT_SMBUS_MODE        0x80
+#define   DFLT_PS2_MODE          0x00
+#define DFLT_RESP_BIT_MODE       0x40
+#define   DFLT_RESP_REMOTE_MODE  0x40
+#define   DFLT_RESP_STREAM_MODE  0x00
+#define DFLT_RESP_BIT_REPORTING  0x20
+#define DFLT_RESP_BIT_SCALING    0x10
+
+#define TP_METRICS_BIT_PALM               0x80
+#define TP_METRICS_BIT_STUBBORN           0x40
+#define TP_METRICS_BIT_2F_JITTER          0x30
+#define TP_METRICS_BIT_1F_JITTER          0x0c
+#define TP_METRICS_BIT_APA                0x02
+#define TP_METRICS_BIT_MTG                0x01
+#define TP_METRICS_BIT_ABS_PKT_FORMAT_SET 0xf0
+#define TP_METRICS_BIT_2F_SPIKE           0x0c
+#define TP_METRICS_BIT_1F_SPIKE           0x03
+
+/* bits of first byte response of E9h-Status Request command. */
+#define RESP_BTN_RIGHT_BIT  0x01
+#define RESP_BTN_MIDDLE_BIT 0x02
+#define RESP_BTN_LEFT_BIT   0x04
+#define RESP_SCALING_BIT    0x10
+#define RESP_ENABLE_BIT     0x20
+#define RESP_REMOTE_BIT     0x40
+#define RESP_SMBUS_BIT      0x80
+
+#define CYTP_MAX_MT_SLOTS 2
+
+struct cytp_contact {
+       int x;
+       int y;
+       int z;  /* also named as touch pressure. */
+};
+
+/* The structure of Cypress Trackpad event data. */
+struct cytp_report_data {
+       int contact_cnt;
+       struct cytp_contact contacts[CYTP_MAX_MT_SLOTS];
+       unsigned int left:1;
+       unsigned int right:1;
+       unsigned int middle:1;
+       unsigned int tap:1;  /* multi-finger tap detected. */
+};
+
+/* The structure of Cypress Trackpad device private data. */
+struct cytp_data {
+       int fw_version;
+
+       int pkt_size;
+       int mode;
+
+       int tp_min_pressure;
+       int tp_max_pressure;
+       int tp_width;  /* X direction physical size in mm. */
+       int tp_high;  /* Y direction physical size in mm. */
+       int tp_max_abs_x;  /* Max X absolute units that can be reported. */
+       int tp_max_abs_y;  /* Max Y absolute units that can be reported. */
+
+       int tp_res_x;  /* X resolution in units/mm. */
+       int tp_res_y;  /* Y resolution in units/mm. */
+
+       int tp_metrics_supported;
+};
+
+
+#ifdef CONFIG_MOUSE_PS2_CYPRESS
+int cypress_detect(struct psmouse *psmouse, bool set_properties);
+int cypress_init(struct psmouse *psmouse);
+bool cypress_supported(void);
+#else
+inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
+{
+       return -ENOSYS;
+}
+inline int cypress_init(struct psmouse *psmouse)
+{
+       return -ENOSYS;
+}
+inline bool cypress_supported(void)
+{
+       return 0;
+}
+#endif /* CONFIG_MOUSE_PS2_CYPRESS */
+
+#endif  /* _CYPRESS_PS2_H */
index 22fe2547e16991cc45987d33f7c5bfb4436c5265..cff065f6261cf31a3188226f14fe28e9291f6770 100644 (file)
@@ -34,6 +34,7 @@
 #include "touchkit_ps2.h"
 #include "elantech.h"
 #include "sentelic.h"
+#include "cypress_ps2.h"
 
 #define DRIVER_DESC    "PS/2 mouse driver"
 
@@ -758,6 +759,28 @@ static int psmouse_extensions(struct psmouse *psmouse,
                synaptics_reset(psmouse);
        }
 
+/*
+ * Try Cypress Trackpad.
+ * Must try it before Finger Sensing Pad because Finger Sensing Pad probe
+ * upsets some modules of Cypress Trackpads.
+ */
+       if (max_proto > PSMOUSE_IMEX &&
+                       cypress_detect(psmouse, set_properties) == 0) {
+               if (cypress_supported()) {
+                       if (cypress_init(psmouse) == 0)
+                               return PSMOUSE_CYPRESS;
+
+                       /*
+                        * Finger Sensing Pad probe upsets some modules of
+                        * Cypress Trackpad, must avoid Finger Sensing Pad
+                        * probe if Cypress Trackpad device detected.
+                        */
+                       return PSMOUSE_PS2;
+               }
+
+               max_proto = PSMOUSE_IMEX;
+       }
+
 /*
  * Try ALPS TouchPad
  */
@@ -896,6 +919,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
                .alias          = "thinkps",
                .detect         = thinking_detect,
        },
+#ifdef CONFIG_MOUSE_PS2_CYPRESS
+       {
+               .type           = PSMOUSE_CYPRESS,
+               .name           = "CyPS/2",
+               .alias          = "cypress",
+               .detect         = cypress_detect,
+               .init           = cypress_init,
+       },
+#endif
        {
                .type           = PSMOUSE_GENPS,
                .name           = "GenPS/2",
index fe1df231ba4c8dfc07488fd6d4fe1031344e0f76..2f0b39d59a9bd51aa9dd3e803f9502a0515c5f61 100644 (file)
@@ -95,6 +95,7 @@ enum psmouse_type {
        PSMOUSE_ELANTECH,
        PSMOUSE_FSP,
        PSMOUSE_SYNAPTICS_RELATIVE,
+       PSMOUSE_CYPRESS,
        PSMOUSE_AUTO            /* This one should always be last */
 };
 
index 12d12ca3fee030e0ab9ced706616c1160a1c584b..2f78538e09d0f9e733be6f862a4de8e9d2c0301c 100644 (file)
@@ -722,11 +722,13 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
        default:
                /*
                 * If the finger slot contained in SGM is valid, and either
-                * hasn't changed, or is new, then report SGM in MTB slot 0.
+                * hasn't changed, or is new, or the old SGM has now moved to
+                * AGM, then report SGM in MTB slot 0.
                 * Otherwise, empty MTB slot 0.
                 */
                if (mt_state->sgm != -1 &&
-                   (mt_state->sgm == old->sgm || old->sgm == -1))
+                   (mt_state->sgm == old->sgm ||
+                    old->sgm == -1 || mt_state->agm == old->sgm))
                        synaptics_report_slot(dev, 0, sgm);
                else
                        synaptics_report_slot(dev, 0, NULL);
@@ -735,9 +737,31 @@ static void synaptics_report_mt_data(struct psmouse *psmouse,
                 * If the finger slot contained in AGM is valid, and either
                 * hasn't changed, or is new, then report AGM in MTB slot 1.
                 * Otherwise, empty MTB slot 1.
+                *
+                * However, in the case where the AGM is new, make sure that
+                * that it is either the same as the old SGM, or there was no
+                * SGM.
+                *
+                * Otherwise, if the SGM was just 1, and the new AGM is 2, then
+                * the new AGM will keep the old SGM's tracking ID, which can
+                * cause apparent drumroll.  This happens if in the following
+                * valid finger sequence:
+                *
+                *  Action                 SGM  AGM (MTB slot:Contact)
+                *  1. Touch contact 0    (0:0)
+                *  2. Touch contact 1    (0:0, 1:1)
+                *  3. Lift  contact 0    (1:1)
+                *  4. Touch contacts 2,3 (0:2, 1:3)
+                *
+                * In step 4, contact 3, in AGM must not be given the same
+                * tracking ID as contact 1 had in step 3.  To avoid this,
+                * the first agm with contact 3 is dropped and slot 1 is
+                * invalidated (tracking ID = -1).
                 */
                if (mt_state->agm != -1 &&
-                   (mt_state->agm == old->agm || old->agm == -1))
+                   (mt_state->agm == old->agm ||
+                    (old->agm == -1 &&
+                     (old->sgm == -1 || mt_state->agm == old->sgm))))
                        synaptics_report_slot(dev, 1, agm);
                else
                        synaptics_report_slot(dev, 1, NULL);
@@ -1247,11 +1271,11 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
        input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
 
        if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
-               input_mt_init_slots(dev, 2, 0);
                set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
                                        ABS_MT_POSITION_Y);
                /* Image sensors can report per-contact pressure */
                input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+               input_mt_init_slots(dev, 2, INPUT_MT_POINTER);
 
                /* Image sensors can signal 4 and 5 finger clicks */
                __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
index 4a4e182c33e7a0c540a52a566b4eeabe038a5e4a..560c243bfcaf41181e7cfbfc88e0a1cb266fce3d 100644 (file)
@@ -236,6 +236,7 @@ config SERIO_PS2MULT
 
 config SERIO_ARC_PS2
        tristate "ARC PS/2 support"
+       depends on GENERIC_HARDIRQS
        help
          Say Y here if you have an ARC FPGA platform with a PS/2
          controller in it.
index f92d34f45a1cc6ad13af95f42440021f3bafe2bf..aaf23aeae2ea428b4a26ea0cd99f20eeac32c964 100644 (file)
@@ -553,10 +553,10 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
        if (!rep_data)
                return error;
 
-       rep_data[0] = report_id;
-       rep_data[1] = mode;
-
        do {
+               rep_data[0] = report_id;
+               rep_data[1] = mode;
+
                error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
                                         report_id, rep_data, length, 1);
                if (error >= 0)
index 264138f3217eb9fe2cc32cb23c84ee0b20d5d329..41b6fbf601122b26500c397d24e34f9789286f81 100644 (file)
@@ -359,6 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x802: /* Intuos4 General Pen */
                case 0x804: /* Intuos4 Marker Pen */
                case 0x40802: /* Intuos4 Classic Pen */
+               case 0x18803: /* DTH2242 Grip Pen */
                case 0x022:
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
@@ -538,6 +539,13 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                                input_report_key(input, wacom->tool[1], 0);
                                input_report_abs(input, ABS_MISC, 0);
                        }
+               } else if (features->type == DTK) {
+                       input_report_key(input, BTN_0, (data[6] & 0x01));
+                       input_report_key(input, BTN_1, (data[6] & 0x02));
+                       input_report_key(input, BTN_2, (data[6] & 0x04));
+                       input_report_key(input, BTN_3, (data[6] & 0x08));
+                       input_report_key(input, BTN_4, (data[6] & 0x10));
+                       input_report_key(input, BTN_5, (data[6] & 0x20));
                } else if (features->type == WACOM_24HD) {
                        input_report_key(input, BTN_0, (data[6] & 0x01));
                        input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -785,25 +793,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
        return 1;
 }
 
-static int find_slot_from_contactid(struct wacom_wac *wacom, int contactid)
-{
-       int touch_max = wacom->features.touch_max;
-       int i;
-
-       if (!wacom->slots)
-               return -1;
-
-       for (i = 0; i < touch_max; ++i) {
-               if (wacom->slots[i] == contactid)
-                       return i;
-       }
-       for (i = 0; i < touch_max; ++i) {
-               if (wacom->slots[i] == -1)
-                       return i;
-       }
-       return -1;
-}
-
 static int int_dist(int x1, int y1, int x2, int y2)
 {
        int x = x2 - x1;
@@ -833,8 +822,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
        for (i = 0; i < contacts_to_send; i++) {
                int offset = (WACOM_BYTES_PER_24HDT_PACKET * i) + 1;
                bool touch = data[offset] & 0x1 && !wacom->shared->stylus_in_proximity;
-               int id = data[offset + 1];
-               int slot = find_slot_from_contactid(wacom, id);
+               int slot = input_mt_get_slot_by_key(input, data[offset + 1]);
 
                if (slot < 0)
                        continue;
@@ -856,9 +844,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
                        input_report_abs(input, ABS_MT_WIDTH_MINOR, min(w, h));
                        input_report_abs(input, ABS_MT_ORIENTATION, w > h);
                }
-               wacom->slots[slot] = touch ? id : -1;
        }
-
        input_mt_report_pointer_emulation(input, true);
 
        wacom->num_contacts_left -= contacts_to_send;
@@ -895,7 +881,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
                int offset = (WACOM_BYTES_PER_MT_PACKET + x_offset) * i + 3;
                bool touch = data[offset] & 0x1;
                int id = le16_to_cpup((__le16 *)&data[offset + 1]);
-               int slot = find_slot_from_contactid(wacom, id);
+               int slot = input_mt_get_slot_by_key(input, id);
 
                if (slot < 0)
                        continue;
@@ -908,9 +894,7 @@ static int wacom_mt_touch(struct wacom_wac *wacom)
                        input_report_abs(input, ABS_MT_POSITION_X, x);
                        input_report_abs(input, ABS_MT_POSITION_Y, y);
                }
-               wacom->slots[slot] = touch ? id : -1;
        }
-
        input_mt_report_pointer_emulation(input, true);
 
        wacom->num_contacts_left -= contacts_to_send;
@@ -942,12 +926,11 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
                        contact_with_no_pen_down_count++;
                }
        }
+       input_mt_report_pointer_emulation(input, true);
 
        /* keep touch state for pen event */
        wacom->shared->touch_down = (contact_with_no_pen_down_count > 0);
 
-       input_mt_report_pointer_emulation(input, true);
-
        return 1;
 }
 
@@ -1104,12 +1087,15 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
 static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
 {
        struct input_dev *input = wacom->input;
-       int slot_id = data[0] - 2;  /* data[0] is between 2 and 17 */
        bool touch = data[1] & 0x80;
+       int slot = input_mt_get_slot_by_key(input, data[0]);
+
+       if (slot < 0)
+               return;
 
        touch = touch && !wacom->shared->stylus_in_proximity;
 
-       input_mt_slot(input, slot_id);
+       input_mt_slot(input, slot);
        input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
 
        if (touch) {
@@ -1162,7 +1148,6 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
                        wacom_bpt3_button_msg(wacom, data + offset);
 
        }
-
        input_mt_report_pointer_emulation(input, true);
 
        input_sync(input);
@@ -1319,6 +1304,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
        case WACOM_21UX2:
        case WACOM_22HD:
        case WACOM_24HD:
+       case DTK:
                sync = wacom_intuos_irq(wacom_wac);
                break;
 
@@ -1444,39 +1430,64 @@ static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
        return (logical_max * 100) / physical_max;
 }
 
-int wacom_setup_input_capabilities(struct input_dev *input_dev,
-                                  struct wacom_wac *wacom_wac)
+static void wacom_abs_set_axis(struct input_dev *input_dev,
+                              struct wacom_wac *wacom_wac)
 {
        struct wacom_features *features = &wacom_wac->features;
-       int i;
-
-       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
-       __set_bit(BTN_TOUCH, input_dev->keybit);
-
-       input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
-                            features->x_fuzz, 0);
-       input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
-                            features->y_fuzz, 0);
 
        if (features->device_type == BTN_TOOL_PEN) {
-               input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max,
-                            features->pressure_fuzz, 0);
+               input_set_abs_params(input_dev, ABS_X, 0, features->x_max,
+                                    features->x_fuzz, 0);
+               input_set_abs_params(input_dev, ABS_Y, 0, features->y_max,
+                                    features->y_fuzz, 0);
+               input_set_abs_params(input_dev, ABS_PRESSURE, 0,
+                       features->pressure_max, features->pressure_fuzz, 0);
 
                /* penabled devices have fixed resolution for each model */
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
        } else {
-               input_abs_set_res(input_dev, ABS_X,
-                       wacom_calculate_touch_res(features->x_max,
-                                               features->x_phy));
-               input_abs_set_res(input_dev, ABS_Y,
-                       wacom_calculate_touch_res(features->y_max,
-                                               features->y_phy));
+               if (features->touch_max <= 2) {
+                       input_set_abs_params(input_dev, ABS_X, 0,
+                               features->x_max, features->x_fuzz, 0);
+                       input_set_abs_params(input_dev, ABS_Y, 0,
+                               features->y_max, features->y_fuzz, 0);
+                       input_abs_set_res(input_dev, ABS_X,
+                               wacom_calculate_touch_res(features->x_max,
+                                                       features->x_phy));
+                       input_abs_set_res(input_dev, ABS_Y,
+                               wacom_calculate_touch_res(features->y_max,
+                                                       features->y_phy));
+               }
+
+               if (features->touch_max > 1) {
+                       input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+                               features->x_max, features->x_fuzz, 0);
+                       input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+                               features->y_max, features->y_fuzz, 0);
+                       input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+                               wacom_calculate_touch_res(features->x_max,
+                                                       features->x_phy));
+                       input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+                               wacom_calculate_touch_res(features->y_max,
+                                                       features->y_phy));
+               }
        }
+}
 
+int wacom_setup_input_capabilities(struct input_dev *input_dev,
+                                  struct wacom_wac *wacom_wac)
+{
+       struct wacom_features *features = &wacom_wac->features;
+       int i;
+
+       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+       __set_bit(BTN_TOUCH, input_dev->keybit);
        __set_bit(ABS_MISC, input_dev->absbit);
 
+       wacom_abs_set_axis(input_dev, wacom_wac);
+
        switch (wacom_wac->features.type) {
        case WACOM_MO:
                input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
@@ -1513,12 +1524,17 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(BTN_Y, input_dev->keybit);
                __set_bit(BTN_Z, input_dev->keybit);
 
-               for (i = 0; i < 10; i++)
+               for (i = 6; i < 10; i++)
                        __set_bit(BTN_0 + i, input_dev->keybit);
 
                __set_bit(KEY_PROG1, input_dev->keybit);
                __set_bit(KEY_PROG2, input_dev->keybit);
                __set_bit(KEY_PROG3, input_dev->keybit);
+               /* fall through */
+
+       case DTK:
+               for (i = 0; i < 6; i++)
+                       __set_bit(BTN_0 + i, input_dev->keybit);
 
                input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
                input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
@@ -1614,24 +1630,11 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                } else if (features->device_type == BTN_TOOL_FINGER) {
                        __clear_bit(ABS_MISC, input_dev->absbit);
 
-                       __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-                       __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-                       __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
-                       __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
-
-                       input_mt_init_slots(input_dev, features->touch_max, 0);
-
                        input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
                                             0, features->x_max, 0, 0);
                        input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
                                             0, features->y_max, 0, 0);
-
-                       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
-                                            0, features->x_max,
-                                            features->x_fuzz, 0);
-                       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
-                                            0, features->y_max,
-                                            features->y_fuzz, 0);
+                       input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
                }
                break;
 
@@ -1662,27 +1665,14 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
 
        case MTSCREEN:
        case MTTPC:
-               if (features->device_type == BTN_TOOL_FINGER) {
-                       wacom_wac->slots = kmalloc(features->touch_max *
-                                                       sizeof(int),
-                                                  GFP_KERNEL);
-                       if (!wacom_wac->slots)
-                               return -ENOMEM;
-
-                       for (i = 0; i < features->touch_max; i++)
-                               wacom_wac->slots[i] = -1;
-               }
-               /* fall through */
-
        case TABLETPC2FG:
                if (features->device_type == BTN_TOOL_FINGER) {
-                       input_mt_init_slots(input_dev, features->touch_max, 0);
-                       input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE,
-                                       0, MT_TOOL_MAX, 0, 0);
-                       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
-                                       0, features->x_max, 0, 0);
-                       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
-                                       0, features->y_max, 0, 0);
+                       unsigned int flags = INPUT_MT_DIRECT;
+
+                       if (wacom_wac->features.type == TABLETPC2FG)
+                               flags = 0;
+
+                       input_mt_init_slots(input_dev, features->touch_max, flags);
                }
                /* fall through */
 
@@ -1725,35 +1715,26 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
 
                if (features->device_type == BTN_TOOL_FINGER) {
+                       unsigned int flags = INPUT_MT_POINTER;
+
                        __set_bit(BTN_LEFT, input_dev->keybit);
                        __set_bit(BTN_FORWARD, input_dev->keybit);
                        __set_bit(BTN_BACK, input_dev->keybit);
                        __set_bit(BTN_RIGHT, input_dev->keybit);
 
-                       __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-                       __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-                       input_mt_init_slots(input_dev, features->touch_max, 0);
-
                        if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
-                               __set_bit(BTN_TOOL_TRIPLETAP,
-                                         input_dev->keybit);
-                               __set_bit(BTN_TOOL_QUADTAP,
-                                         input_dev->keybit);
-
                                input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MAJOR,
                                                     0, features->x_max, 0, 0);
                                input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MINOR,
                                                     0, features->y_max, 0, 0);
+                       } else {
+                               __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
+                               __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
+                               flags = 0;
                        }
-
-                       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
-                                            0, features->x_max,
-                                            features->x_fuzz, 0);
-                       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
-                                            0, features->y_max,
-                                            features->y_fuzz, 0);
+                       input_mt_init_slots(input_dev, features->touch_max, flags);
                } else if (features->device_type == BTN_TOOL_PEN) {
                        __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
                        __set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1978,6 +1959,13 @@ static const struct wacom_features wacom_features_0xCE =
 static const struct wacom_features wacom_features_0xF0 =
        { "Wacom DTU1631",        WACOM_PKGLEN_GRAPHIRE,  34623, 19553,  511,
          0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x59 = /* Pen */
+       { "Wacom DTH2242",        WACOM_PKGLEN_INTUOS,    95840, 54260, 2047,
+         63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
+static const struct wacom_features wacom_features_0x5D = /* Touch */
+       { "Wacom DTH2242",       .type = WACOM_24HDT,
+         .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 };
 static const struct wacom_features wacom_features_0xCC =
        { "Wacom Cintiq 21UX2",   WACOM_PKGLEN_INTUOS,    87200, 65600, 2047,
          63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -2152,6 +2140,8 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x43) },
        { USB_DEVICE_WACOM(0x44) },
        { USB_DEVICE_WACOM(0x45) },
+       { USB_DEVICE_WACOM(0x59) },
+       { USB_DEVICE_WACOM(0x5D) },
        { USB_DEVICE_WACOM(0xB0) },
        { USB_DEVICE_WACOM(0xB1) },
        { USB_DEVICE_WACOM(0xB2) },
index 9396d7769f86a86930b56f3f2e1fbbae024b82cf..5f9a7721e16cf36a1a92021a03ddb45648292b88 100644 (file)
@@ -78,6 +78,7 @@ enum {
        INTUOS5L,
        WACOM_21UX2,
        WACOM_22HD,
+       DTK,
        WACOM_24HD,
        CINTIQ,
        WACOM_BEE,
@@ -135,7 +136,6 @@ struct wacom_wac {
        int pid;
        int battery_capacity;
        int num_contacts_left;
-       int *slots;
 };
 
 #endif
index 515cfe7905430fb621362231ac7dd468f42a3016..f9a5fd89bc022bd50c19c571441490056d0e9d83 100644 (file)
@@ -359,7 +359,7 @@ config TOUCHSCREEN_MCS5000
 
 config TOUCHSCREEN_MMS114
        tristate "MELFAS MMS114 touchscreen"
-       depends on I2C
+       depends on I2C && GENERIC_HARDIRQS
        help
          Say Y here if you have the MELFAS MMS114 touchscreen controller
          chip in your system.
index 638e20310f12e1c5a7007a097011827b99dd7fe1..861b7f77605ba9f2c107fc7080457c5a6d8f4916 100644 (file)
@@ -193,7 +193,6 @@ static struct spi_driver cyttsp_spi_driver = {
 
 module_spi_driver(cyttsp_spi_driver);
 
-MODULE_ALIAS("spi:cyttsp");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard Product (TTSP) SPI driver");
 MODULE_AUTHOR("Cypress");
index 98841d8aa635a50540323929e462e49d358fdbc1..4a29ddf6bf1e9a9bfed9a1afcc8cba810e66800c 100644 (file)
@@ -429,12 +429,12 @@ static int mms114_probe(struct i2c_client *client,
                return -ENODEV;
        }
 
-       data = kzalloc(sizeof(struct mms114_data), GFP_KERNEL);
-       input_dev = input_allocate_device();
+       data = devm_kzalloc(&client->dev, sizeof(struct mms114_data),
+                           GFP_KERNEL);
+       input_dev = devm_input_allocate_device(&client->dev);
        if (!data || !input_dev) {
                dev_err(&client->dev, "Failed to allocate memory\n");
-               error = -ENOMEM;
-               goto err_free_mem;
+               return -ENOMEM;
        }
 
        data->client = client;
@@ -466,57 +466,36 @@ static int mms114_probe(struct i2c_client *client,
        input_set_drvdata(input_dev, data);
        i2c_set_clientdata(client, data);
 
-       data->core_reg = regulator_get(&client->dev, "avdd");
+       data->core_reg = devm_regulator_get(&client->dev, "avdd");
        if (IS_ERR(data->core_reg)) {
                error = PTR_ERR(data->core_reg);
                dev_err(&client->dev,
                        "Unable to get the Core regulator (%d)\n", error);
-               goto err_free_mem;
+               return error;
        }
 
-       data->io_reg = regulator_get(&client->dev, "vdd");
+       data->io_reg = devm_regulator_get(&client->dev, "vdd");
        if (IS_ERR(data->io_reg)) {
                error = PTR_ERR(data->io_reg);
                dev_err(&client->dev,
                        "Unable to get the IO regulator (%d)\n", error);
-               goto err_core_reg;
+               return error;
        }
 
-       error = request_threaded_irq(client->irq, NULL, mms114_interrupt,
-                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "mms114", data);
+       error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+                       mms114_interrupt, IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                       dev_name(&client->dev), data);
        if (error) {
                dev_err(&client->dev, "Failed to register interrupt\n");
-               goto err_io_reg;
+               return error;
        }
        disable_irq(client->irq);
 
        error = input_register_device(data->input_dev);
-       if (error)
-               goto err_free_irq;
-
-       return 0;
-
-err_free_irq:
-       free_irq(client->irq, data);
-err_io_reg:
-       regulator_put(data->io_reg);
-err_core_reg:
-       regulator_put(data->core_reg);
-err_free_mem:
-       input_free_device(input_dev);
-       kfree(data);
-       return error;
-}
-
-static int mms114_remove(struct i2c_client *client)
-{
-       struct mms114_data *data = i2c_get_clientdata(client);
-
-       free_irq(client->irq, data);
-       regulator_put(data->io_reg);
-       regulator_put(data->core_reg);
-       input_unregister_device(data->input_dev);
-       kfree(data);
+       if (error) {
+               dev_err(&client->dev, "Failed to register input device\n");
+               return error;
+       }
 
        return 0;
 }
@@ -590,7 +569,6 @@ static struct i2c_driver mms114_driver = {
                .of_match_table = of_match_ptr(mms114_dt_match),
        },
        .probe          = mms114_probe,
-       .remove         = mms114_remove,
        .id_table       = mms114_id,
 };
 
index 84d884b4ec3ef5b3d757bbff5826ec5bfb438a45..59e81b00f244c1852eca936493538e52d4eb3085 100644 (file)
@@ -120,6 +120,7 @@ static void stmpe_work(struct work_struct *work)
        __stmpe_reset_fifo(ts->stmpe);
 
        input_report_abs(ts->idev, ABS_PRESSURE, 0);
+       input_report_key(ts->idev, BTN_TOUCH, 0);
        input_sync(ts->idev);
 }
 
@@ -153,6 +154,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
        input_report_abs(ts->idev, ABS_X, x);
        input_report_abs(ts->idev, ABS_Y, y);
        input_report_abs(ts->idev, ABS_PRESSURE, z);
+       input_report_key(ts->idev, BTN_TOUCH, 1);
        input_sync(ts->idev);
 
        /* flush the FIFO after we have read out our values. */
index 9c0cdc7ea4493d7e9f973c7260baad69d19f74a9..7213e8b07e79bfa48d8b0e5d1f89d5684fee711c 100644 (file)
@@ -753,3 +753,4 @@ module_spi_driver(tsc2005_driver);
 MODULE_AUTHOR("Lauri Leukkunen <lauri.leukkunen@nokia.com>");
 MODULE_DESCRIPTION("TSC2005 Touchscreen Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tsc2005");
index f88fab56178cd5852b5844ea106335a613dc9014..6be2eb6a153a5588180da011fd91611d2ad597ad 100644 (file)
@@ -247,7 +247,7 @@ static int wm831x_ts_probe(struct platform_device *pdev)
 
        wm831x_ts = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ts),
                                 GFP_KERNEL);
-       input_dev = input_allocate_device();
+       input_dev = devm_input_allocate_device(&pdev->dev);
        if (!wm831x_ts || !input_dev) {
                error = -ENOMEM;
                goto err_alloc;
@@ -376,7 +376,6 @@ err_pd_irq:
 err_data_irq:
        free_irq(wm831x_ts->data_irq, wm831x_ts);
 err_alloc:
-       input_free_device(input_dev);
 
        return error;
 }
@@ -387,7 +386,6 @@ static int wm831x_ts_remove(struct platform_device *pdev)
 
        free_irq(wm831x_ts->pd_irq, wm831x_ts);
        free_irq(wm831x_ts->data_irq, wm831x_ts);
-       input_unregister_device(wm831x_ts->input_dev);
 
        return 0;
 }
index c1c74e030a58df6ea1020cec05c0f01acb14fb76..d33eaaf783ad571378d8125e66c243054eb6065c 100644 (file)
@@ -4017,10 +4017,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
 
                        index -= count - 1;
 
+                       cfg->remapped         = 1;
                        irte_info             = &cfg->irq_2_iommu;
                        irte_info->sub_handle = devid;
                        irte_info->irte_index = index;
-                       irte_info->iommu      = (void *)cfg;
 
                        goto out;
                }
@@ -4127,9 +4127,9 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
        index = attr->ioapic_pin;
 
        /* Setup IRQ remapping info */
+       cfg->remapped         = 1;
        irte_info->sub_handle = devid;
        irte_info->irte_index = index;
-       irte_info->iommu      = (void *)cfg;
 
        /* Setup IRTE for IOMMU */
        irte.val                = 0;
@@ -4288,9 +4288,9 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
        devid           = get_device_id(&pdev->dev);
        irte_info       = &cfg->irq_2_iommu;
 
+       cfg->remapped         = 1;
        irte_info->sub_handle = devid;
        irte_info->irte_index = index + offset;
-       irte_info->iommu      = (void *)cfg;
 
        return 0;
 }
@@ -4314,9 +4314,9 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
        if (index < 0)
                return index;
 
+       cfg->remapped         = 1;
        irte_info->sub_handle = devid;
        irte_info->irte_index = index;
-       irte_info->iommu      = (void *)cfg;
 
        return 0;
 }
index 81837b0710a9ba0240b1eba3c1b3f8b51d997e2f..faf10ba1ed9ad1dcfcd00dabc26c725cf7e9ad00 100644 (file)
@@ -974,6 +974,38 @@ static void __init free_iommu_all(void)
        }
 }
 
+/*
+ * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations)
+ * Workaround:
+ *     BIOS should disable L2B micellaneous clock gating by setting
+ *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
+ */
+static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+{
+       u32 value;
+
+       if ((boot_cpu_data.x86 != 0x15) ||
+           (boot_cpu_data.x86_model < 0x10) ||
+           (boot_cpu_data.x86_model > 0x1f))
+               return;
+
+       pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+       pci_read_config_dword(iommu->dev, 0xf4, &value);
+
+       if (value & BIT(2))
+               return;
+
+       /* Select NB indirect register 0x90 and enable writing */
+       pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
+
+       pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
+       pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n",
+               dev_name(&iommu->dev->dev));
+
+       /* Clear the enable writing bit */
+       pci_write_config_dword(iommu->dev, 0xf0, 0x90);
+}
+
 /*
  * This function clues the initialization function for one IOMMU
  * together and also allocates the command buffer and programs the
@@ -1172,6 +1204,8 @@ static int iommu_init_pci(struct amd_iommu *iommu)
                        iommu->stored_l2[i] = iommu_read_l2(iommu, i);
        }
 
+       amd_iommu_erratum_746_workaround(iommu);
+
        return pci_enable_device(iommu->dev);
 }
 
index 86e2f4a62b9a95fed887dda18b6bd5bdd2cdacda..174bb654453de7e4ece57f1e0807a48bd85122e5 100644 (file)
@@ -41,6 +41,8 @@
 #include <asm/irq_remapping.h>
 #include <asm/iommu_table.h>
 
+#include "irq_remapping.h"
+
 /* No locks are needed as DMA remapping hardware unit
  * list is constructed at boot time and hotplug of
  * these units are not supported by the architecture.
index b9d091157884570f636492be2e49e2e346697664..43d5c8b8e7ad05681e758d23727fa680595c62f3 100644 (file)
@@ -46,6 +46,8 @@
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 
+#include "irq_remapping.h"
+
 #define ROOT_SIZE              VTD_PAGE_SIZE
 #define CONTEXT_SIZE           VTD_PAGE_SIZE
 
@@ -4234,6 +4236,21 @@ static struct iommu_ops intel_iommu_ops = {
        .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
 };
 
+static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
+{
+       /* G4x/GM45 integrated gfx dmar support is totally busted. */
+       printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
+       dmar_map_gfx = 0;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
+
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {
        /*
@@ -4242,12 +4259,6 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
         */
        printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
        rwbf_quirk = 1;
-
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */
-       if (dev->revision == 0x07) {
-               printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n");
-               dmar_map_gfx = 0;
-       }
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
index af8904de1d44e63ca35be4a58320889b2bab7a8b..f3b8f23b5d8f1d89cd5dea093faa9441061ca00c 100644 (file)
@@ -68,6 +68,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
 {
        struct ir_table *table = iommu->ir_table;
        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
        u16 index, start_index;
        unsigned int mask = 0;
        unsigned long flags;
@@ -115,6 +116,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
        for (i = index; i < index + count; i++)
                table->base[i].present = 1;
 
+       cfg->remapped = 1;
        irq_iommu->iommu = iommu;
        irq_iommu->irte_index =  index;
        irq_iommu->sub_handle = 0;
@@ -155,6 +157,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 {
        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
        unsigned long flags;
 
        if (!irq_iommu)
@@ -162,6 +165,7 @@ static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subha
 
        raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
 
+       cfg->remapped = 1;
        irq_iommu->iommu = iommu;
        irq_iommu->irte_index = index;
        irq_iommu->sub_handle = subhandle;
@@ -425,11 +429,22 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
 
        /* Enable interrupt-remapping */
        iommu->gcmd |= DMA_GCMD_IRE;
+       iommu->gcmd &= ~DMA_GCMD_CFI;  /* Block compatibility-format MSIs */
        writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
 
        IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
                      readl, (sts & DMA_GSTS_IRES), sts);
 
+       /*
+        * With CFI clear in the Global Command register, we should be
+        * protected from dangerous (i.e. compatibility) interrupts
+        * regardless of x2apic status.  Check just to be sure.
+        */
+       if (sts & DMA_GSTS_CFIS)
+               WARN(1, KERN_WARNING
+                       "Compatibility-format IRQs enabled despite intr remapping;\n"
+                       "you are vulnerable to IRQ injection.\n");
+
        raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
 }
 
@@ -526,20 +541,24 @@ static int __init intel_irq_remapping_supported(void)
 static int __init intel_enable_irq_remapping(void)
 {
        struct dmar_drhd_unit *drhd;
+       bool x2apic_present;
        int setup = 0;
        int eim = 0;
 
+       x2apic_present = x2apic_supported();
+
        if (parse_ioapics_under_ir() != 1) {
                printk(KERN_INFO "Not enable interrupt remapping\n");
-               return -1;
+               goto error;
        }
 
-       if (x2apic_supported()) {
+       if (x2apic_present) {
                eim = !dmar_x2apic_optout();
-               WARN(!eim, KERN_WARNING
-                          "Your BIOS is broken and requested that x2apic be disabled\n"
-                          "This will leave your machine vulnerable to irq-injection attacks\n"
-                          "Use 'intremap=no_x2apic_optout' to override BIOS request\n");
+               if (!eim)
+                       printk(KERN_WARNING
+                               "Your BIOS is broken and requested that x2apic be disabled.\n"
+                               "This will slightly decrease performance.\n"
+                               "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
        }
 
        for_each_drhd_unit(drhd) {
@@ -578,7 +597,7 @@ static int __init intel_enable_irq_remapping(void)
                if (eim && !ecap_eim_support(iommu->ecap)) {
                        printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
                               " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
-                       return -1;
+                       goto error;
                }
        }
 
@@ -594,7 +613,7 @@ static int __init intel_enable_irq_remapping(void)
                        printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
                               " invalidation, ecap %Lx, ret %d\n",
                               drhd->reg_base_addr, iommu->ecap, ret);
-                       return -1;
+                       goto error;
                }
        }
 
@@ -617,6 +636,14 @@ static int __init intel_enable_irq_remapping(void)
                goto error;
 
        irq_remapping_enabled = 1;
+
+       /*
+        * VT-d has a different layout for IO-APIC entries when
+        * interrupt remapping is enabled. So it needs a special routine
+        * to print IO-APIC entries for debugging purposes too.
+        */
+       x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
+
        pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
 
        return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
@@ -625,6 +652,11 @@ error:
        /*
         * handle error condition gracefully here!
         */
+
+       if (x2apic_present)
+               WARN(1, KERN_WARNING
+                       "Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
+
        return -1;
 }
 
index faf85d6e33fe0bb3dde0c229570fac156d9e9798..d56f8c17c5fe4dcc2e4db23085d4914b977c0fd0 100644 (file)
@@ -1,11 +1,18 @@
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/cpumask.h>
 #include <linux/errno.h>
 #include <linux/msi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
 
 #include <asm/hw_irq.h>
 #include <asm/irq_remapping.h>
+#include <asm/processor.h>
+#include <asm/x86_init.h>
+#include <asm/apic.h>
 
 #include "irq_remapping.h"
 
@@ -17,6 +24,152 @@ int no_x2apic_optout;
 
 static struct irq_remap_ops *remap_ops;
 
+static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
+static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+                                 int index, int sub_handle);
+static int set_remapped_irq_affinity(struct irq_data *data,
+                                    const struct cpumask *mask,
+                                    bool force);
+
+static bool irq_remapped(struct irq_cfg *cfg)
+{
+       return (cfg->remapped == 1);
+}
+
+static void irq_remapping_disable_io_apic(void)
+{
+       /*
+        * With interrupt-remapping, for now we will use virtual wire A
+        * mode, as virtual wire B is little complex (need to configure
+        * both IOAPIC RTE as well as interrupt-remapping table entry).
+        * As this gets called during crash dump, keep this simple for
+        * now.
+        */
+       if (cpu_has_apic || apic_from_smp_config())
+               disconnect_bsp_APIC(0);
+}
+
+static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
+{
+       int node, ret, sub_handle, index = 0;
+       unsigned int irq;
+       struct msi_desc *msidesc;
+
+       nvec = __roundup_pow_of_two(nvec);
+
+       WARN_ON(!list_is_singular(&dev->msi_list));
+       msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+       WARN_ON(msidesc->irq);
+       WARN_ON(msidesc->msi_attrib.multiple);
+
+       node = dev_to_node(&dev->dev);
+       irq = __create_irqs(get_nr_irqs_gsi(), nvec, node);
+       if (irq == 0)
+               return -ENOSPC;
+
+       msidesc->msi_attrib.multiple = ilog2(nvec);
+       for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
+               if (!sub_handle) {
+                       index = msi_alloc_remapped_irq(dev, irq, nvec);
+                       if (index < 0) {
+                               ret = index;
+                               goto error;
+                       }
+               } else {
+                       ret = msi_setup_remapped_irq(dev, irq + sub_handle,
+                                                    index, sub_handle);
+                       if (ret < 0)
+                               goto error;
+               }
+               ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
+               if (ret < 0)
+                       goto error;
+       }
+       return 0;
+
+error:
+       destroy_irqs(irq, nvec);
+
+       /*
+        * Restore altered MSI descriptor fields and prevent just destroyed
+        * IRQs from tearing down again in default_teardown_msi_irqs()
+        */
+       msidesc->irq = 0;
+       msidesc->msi_attrib.multiple = 0;
+
+       return ret;
+}
+
+static int do_setup_msix_irqs(struct pci_dev *dev, int nvec)
+{
+       int node, ret, sub_handle, index = 0;
+       struct msi_desc *msidesc;
+       unsigned int irq;
+
+       node            = dev_to_node(&dev->dev);
+       irq             = get_nr_irqs_gsi();
+       sub_handle      = 0;
+
+       list_for_each_entry(msidesc, &dev->msi_list, list) {
+
+               irq = create_irq_nr(irq, node);
+               if (irq == 0)
+                       return -1;
+
+               if (sub_handle == 0)
+                       ret = index = msi_alloc_remapped_irq(dev, irq, nvec);
+               else
+                       ret = msi_setup_remapped_irq(dev, irq, index, sub_handle);
+
+               if (ret < 0)
+                       goto error;
+
+               ret = setup_msi_irq(dev, msidesc, irq, 0);
+               if (ret < 0)
+                       goto error;
+
+               sub_handle += 1;
+               irq        += 1;
+       }
+
+       return 0;
+
+error:
+       destroy_irq(irq);
+       return ret;
+}
+
+static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
+                                       int nvec, int type)
+{
+       if (type == PCI_CAP_ID_MSI)
+               return do_setup_msi_irqs(dev, nvec);
+       else
+               return do_setup_msix_irqs(dev, nvec);
+}
+
+void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
+{
+       /*
+        * Intr-remapping uses pin number as the virtual vector
+        * in the RTE. Actual vector is programmed in
+        * intr-remapping table entry. Hence for the io-apic
+        * EOI we use the pin number.
+        */
+       io_apic_eoi(apic, pin);
+}
+
+static void __init irq_remapping_modify_x86_ops(void)
+{
+       x86_io_apic_ops.disable         = irq_remapping_disable_io_apic;
+       x86_io_apic_ops.set_affinity    = set_remapped_irq_affinity;
+       x86_io_apic_ops.setup_entry     = setup_ioapic_remapped_entry;
+       x86_io_apic_ops.eoi_ioapic_pin  = eoi_ioapic_pin_remapped;
+       x86_msi.setup_msi_irqs          = irq_remapping_setup_msi_irqs;
+       x86_msi.setup_hpet_msi          = setup_hpet_msi_remapped;
+       x86_msi.compose_msi_msg         = compose_remapped_msi_msg;
+}
+
 static __init int setup_nointremap(char *str)
 {
        disable_irq_remap = 1;
@@ -79,15 +232,24 @@ int __init irq_remapping_prepare(void)
 
 int __init irq_remapping_enable(void)
 {
+       int ret;
+
        if (!remap_ops || !remap_ops->enable)
                return -ENODEV;
 
-       return remap_ops->enable();
+       ret = remap_ops->enable();
+
+       if (irq_remapping_enabled)
+               irq_remapping_modify_x86_ops();
+
+       return ret;
 }
 
 void irq_remapping_disable(void)
 {
-       if (!remap_ops || !remap_ops->disable)
+       if (!irq_remapping_enabled ||
+           !remap_ops ||
+           !remap_ops->disable)
                return;
 
        remap_ops->disable();
@@ -95,7 +257,9 @@ void irq_remapping_disable(void)
 
 int irq_remapping_reenable(int mode)
 {
-       if (!remap_ops || !remap_ops->reenable)
+       if (!irq_remapping_enabled ||
+           !remap_ops ||
+           !remap_ops->reenable)
                return 0;
 
        return remap_ops->reenable(mode);
@@ -103,6 +267,9 @@ int irq_remapping_reenable(int mode)
 
 int __init irq_remap_enable_fault_handling(void)
 {
+       if (!irq_remapping_enabled)
+               return 0;
+
        if (!remap_ops || !remap_ops->enable_faulting)
                return -ENODEV;
 
@@ -133,23 +300,28 @@ int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
 
 void free_remapped_irq(int irq)
 {
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
+
        if (!remap_ops || !remap_ops->free_irq)
                return;
 
-       remap_ops->free_irq(irq);
+       if (irq_remapped(cfg))
+               remap_ops->free_irq(irq);
 }
 
 void compose_remapped_msi_msg(struct pci_dev *pdev,
                              unsigned int irq, unsigned int dest,
                              struct msi_msg *msg, u8 hpet_id)
 {
-       if (!remap_ops || !remap_ops->compose_msi_msg)
-               return;
+       struct irq_cfg *cfg = irq_get_chip_data(irq);
 
-       remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+       if (!irq_remapped(cfg))
+               native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+       else if (remap_ops && remap_ops->compose_msi_msg)
+               remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
 }
 
-int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
+static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
 {
        if (!remap_ops || !remap_ops->msi_alloc_irq)
                return -ENODEV;
@@ -157,8 +329,8 @@ int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
        return remap_ops->msi_alloc_irq(pdev, irq, nvec);
 }
 
-int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
-                          int index, int sub_handle)
+static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
+                                 int index, int sub_handle)
 {
        if (!remap_ops || !remap_ops->msi_setup_irq)
                return -ENODEV;
@@ -173,3 +345,42 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
 
        return remap_ops->setup_hpet_msi(irq, id);
 }
+
+void panic_if_irq_remap(const char *msg)
+{
+       if (irq_remapping_enabled)
+               panic(msg);
+}
+
+static void ir_ack_apic_edge(struct irq_data *data)
+{
+       ack_APIC_irq();
+}
+
+static void ir_ack_apic_level(struct irq_data *data)
+{
+       ack_APIC_irq();
+       eoi_ioapic_irq(data->irq, data->chip_data);
+}
+
+static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
+{
+       seq_printf(p, " IR-%s", data->chip->name);
+}
+
+void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+{
+       chip->irq_print_chip = ir_print_prefix;
+       chip->irq_ack = ir_ack_apic_edge;
+       chip->irq_eoi = ir_ack_apic_level;
+       chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
+}
+
+bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
+{
+       if (!irq_remapped(cfg))
+               return false;
+       irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+       irq_remap_modify_chip_defaults(chip);
+       return true;
+}
index 95363acb583fe3613a7d82bbbf7255ea1455c6eb..ecb63767040553b16fa8c469f48a10799c6e8e18 100644 (file)
@@ -34,6 +34,7 @@ struct msi_msg;
 extern int disable_irq_remap;
 extern int disable_sourceid_checking;
 extern int no_x2apic_optout;
+extern int irq_remapping_enabled;
 
 struct irq_remap_ops {
        /* Check whether Interrupt Remapping is supported */
index 68452b768da23dc31c5409b113446802476886c2..03a0a01a405451c8a85e0e9b83647c105a710f53 100644 (file)
@@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
                CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
                CAPIMSG_CONTROL(data));
        l -= 12;
+       if (l <= 0)
+               return;
        dbgline = kmalloc(3 * l, GFP_ATOMIC);
        if (!dbgline)
                return;
index 5f21f629b7aebeac0163083a2a83a50f4e53c1a8..deda591f70b9fab4be605442fb1f6afe9bd9f7be 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/mISDNif.h>
 #include <linux/kthread.h>
+#include <linux/sched.h>
 #include "core.h"
 
 static u_int   *debug;
@@ -202,6 +203,9 @@ static int
 mISDNStackd(void *data)
 {
        struct mISDNstack *st = data;
+#ifdef MISDN_MSG_STATS
+       cputime_t utime, stime;
+#endif
        int err = 0;
 
        sigfillset(&current->blocked);
@@ -303,9 +307,10 @@ mISDNStackd(void *data)
               "msg %d sleep %d stopped\n",
               dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
               st->stopped_cnt);
+       task_cputime(st->thread, &utime, &stime);
        printk(KERN_DEBUG
               "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
-              dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
+              dev_name(&st->dev->dev), utime, stime);
        printk(KERN_DEBUG
               "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
               dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
index 3d8984edeff79b4dda6910670d95d34802411f6e..9e58dbd8d8cba839da9613e5f4e4060795cd0dc1 100644 (file)
@@ -340,24 +340,22 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
 }
 
 /*
- * validate_rebuild_devices
+ * validate_raid_redundancy
  * @rs
  *
- * Determine if the devices specified for rebuild can result in a valid
- * usable array that is capable of rebuilding the given devices.
+ * Determine if there are enough devices in the array that haven't
+ * failed (or are being rebuilt) to form a usable array.
  *
  * Returns: 0 on success, -EINVAL on failure.
  */
-static int validate_rebuild_devices(struct raid_set *rs)
+static int validate_raid_redundancy(struct raid_set *rs)
 {
        unsigned i, rebuild_cnt = 0;
        unsigned rebuilds_per_group, copies, d;
 
-       if (!(rs->print_flags & DMPF_REBUILD))
-               return 0;
-
        for (i = 0; i < rs->md.raid_disks; i++)
-               if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
+               if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
+                   !rs->dev[i].rdev.sb_page)
                        rebuild_cnt++;
 
        switch (rs->raid_type->level) {
@@ -393,27 +391,24 @@ static int validate_rebuild_devices(struct raid_set *rs)
                 *          A    A    B    B    C
                 *          C    D    D    E    E
                 */
-               rebuilds_per_group = 0;
                for (i = 0; i < rs->md.raid_disks * copies; i++) {
+                       if (!(i % copies))
+                               rebuilds_per_group = 0;
                        d = i % rs->md.raid_disks;
-                       if (!test_bit(In_sync, &rs->dev[d].rdev.flags) &&
+                       if ((!rs->dev[d].rdev.sb_page ||
+                            !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
                            (++rebuilds_per_group >= copies))
                                goto too_many;
-                       if (!((i + 1) % copies))
-                               rebuilds_per_group = 0;
                }
                break;
        default:
-               DMERR("The rebuild parameter is not supported for %s",
-                     rs->raid_type->name);
-               rs->ti->error = "Rebuild not supported for this RAID type";
-               return -EINVAL;
+               if (rebuild_cnt)
+                       return -EINVAL;
        }
 
        return 0;
 
 too_many:
-       rs->ti->error = "Too many rebuild devices specified";
        return -EINVAL;
 }
 
@@ -664,9 +659,6 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
        }
        rs->md.dev_sectors = sectors_per_dev;
 
-       if (validate_rebuild_devices(rs))
-               return -EINVAL;
-
        /* Assume there are no metadata devices until the drives are parsed */
        rs->md.persistent = 0;
        rs->md.external = 1;
@@ -995,28 +987,10 @@ static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
 {
        int ret;
-       unsigned redundancy = 0;
        struct raid_dev *dev;
        struct md_rdev *rdev, *tmp, *freshest;
        struct mddev *mddev = &rs->md;
 
-       switch (rs->raid_type->level) {
-       case 1:
-               redundancy = rs->md.raid_disks - 1;
-               break;
-       case 4:
-       case 5:
-       case 6:
-               redundancy = rs->raid_type->parity_devs;
-               break;
-       case 10:
-               redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
-               break;
-       default:
-               ti->error = "Unknown RAID type";
-               return -EINVAL;
-       }
-
        freshest = NULL;
        rdev_for_each_safe(rdev, tmp, mddev) {
                /*
@@ -1045,44 +1019,43 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
                        break;
                default:
                        dev = container_of(rdev, struct raid_dev, rdev);
-                       if (redundancy--) {
-                               if (dev->meta_dev)
-                                       dm_put_device(ti, dev->meta_dev);
-
-                               dev->meta_dev = NULL;
-                               rdev->meta_bdev = NULL;
+                       if (dev->meta_dev)
+                               dm_put_device(ti, dev->meta_dev);
 
-                               if (rdev->sb_page)
-                                       put_page(rdev->sb_page);
+                       dev->meta_dev = NULL;
+                       rdev->meta_bdev = NULL;
 
-                               rdev->sb_page = NULL;
+                       if (rdev->sb_page)
+                               put_page(rdev->sb_page);
 
-                               rdev->sb_loaded = 0;
+                       rdev->sb_page = NULL;
 
-                               /*
-                                * We might be able to salvage the data device
-                                * even though the meta device has failed.  For
-                                * now, we behave as though '- -' had been
-                                * set for this device in the table.
-                                */
-                               if (dev->data_dev)
-                                       dm_put_device(ti, dev->data_dev);
+                       rdev->sb_loaded = 0;
 
-                               dev->data_dev = NULL;
-                               rdev->bdev = NULL;
+                       /*
+                        * We might be able to salvage the data device
+                        * even though the meta device has failed.  For
+                        * now, we behave as though '- -' had been
+                        * set for this device in the table.
+                        */
+                       if (dev->data_dev)
+                               dm_put_device(ti, dev->data_dev);
 
-                               list_del(&rdev->same_set);
+                       dev->data_dev = NULL;
+                       rdev->bdev = NULL;
 
-                               continue;
-                       }
-                       ti->error = "Failed to load superblock";
-                       return ret;
+                       list_del(&rdev->same_set);
                }
        }
 
        if (!freshest)
                return 0;
 
+       if (validate_raid_redundancy(rs)) {
+               rs->ti->error = "Insufficient redundancy to activate array";
+               return -EINVAL;
+       }
+
        /*
         * Validation of the freshest device provides the source of
         * validation for the remaining devices.
@@ -1432,7 +1405,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 4, 0},
+       .version = {1, 4, 1},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 675ae5274016da37484b2c05bfefccad8e158543..5409607d487533593d29c2207d857896121364a3 100644 (file)
@@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
        return 0;
 }
 
-/*
- * A thin device always inherits its queue limits from its pool.
- */
-static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
-{
-       struct thin_c *tc = ti->private;
-
-       *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
-}
-
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 6, 0},
+       .version = {1, 7, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
@@ -2767,7 +2757,6 @@ static struct target_type thin_target = {
        .postsuspend = thin_postsuspend,
        .status = thin_status,
        .iterate_devices = thin_iterate_devices,
-       .io_hints = thin_io_hints,
 };
 
 /*----------------------------------------------------------------*/
index c72e4d5a96178c6a542442e2e66bd3da92473b50..314a0e2faf79d6c097e7c1201bf25af8600baaf6 100644 (file)
@@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
 {
        struct dm_target *ti;
        sector_t len;
+       unsigned num_requests;
 
        do {
                ti = dm_table_find_target(ci->map, ci->sector);
@@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
                 * reconfiguration might also have changed that since the
                 * check was performed.
                 */
-               if (!get_num_requests || !get_num_requests(ti))
+               num_requests = get_num_requests ? get_num_requests(ti) : 0;
+               if (!num_requests)
                        return -EOPNOTSUPP;
 
                if (is_split_required && !is_split_required(ti))
@@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
                else
                        len = min(ci->sector_count, max_io_len(ci->sector, ti));
 
-               __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+               __issue_target_requests(ci, ti, num_requests, len);
 
                ci->sector += len;
        } while (ci->sector_count -= len);
index 49d95040096a42589627d7e3446df5f470c2f919..0223ad255cb4c7e1f3218aa5a0f5debb076963fd 100644 (file)
@@ -1820,7 +1820,7 @@ static int dvb_frontend_ioctl(struct file *file,
        struct dvb_frontend *fe = dvbdev->priv;
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
-       int err = -ENOTTY;
+       int err = -EOPNOTSUPP;
 
        dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd));
        if (fepriv->exit != DVB_FE_NO_EXIT)
@@ -1938,7 +1938,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
                }
 
        } else
-               err = -ENOTTY;
+               err = -EOPNOTSUPP;
 
 out:
        kfree(tvp);
@@ -2071,7 +2071,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
        struct dvb_frontend *fe = dvbdev->priv;
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
-       int err = -ENOTTY;
+       int err = -EOPNOTSUPP;
 
        switch (cmd) {
        case FE_GET_INFO: {
index 8a8d42fe26332e0765f6ebad93b435b954d42da0..d4e7567b367c04d25b9a21ba4c50d0db314596aa 100644 (file)
@@ -556,7 +556,7 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        mutex_lock(&info->lock);
 
        format = __find_format(info, fh, fmt->which, info->res_type);
-       if (!format)
+       if (format)
                fmt->format = *format;
        else
                ret = -EINVAL;
index 1cf8293c0fb048054177b82e0f0ce44b704de81c..4a980e029ca70163b30288663a7f273943002e18 100644 (file)
@@ -23,8 +23,8 @@
 #include <linux/slab.h>
 #include <linux/videodev2.h>
 #include <linux/of.h>
+#include <linux/platform_data/imx-iram.h>
 
-#include <mach/iram.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
index e0d73a642186d34ae63d841cabe3f42c57604acd..8dac17511e618c84d27a9db8bbfbd7587f5c1d2a 100644 (file)
@@ -35,9 +35,6 @@
 #include <linux/vmalloc.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-#include <plat/omap-pm.h>
 
 #include "ispvideo.h"
 #include "isp.h"
index 4ab99f3a7b0950310d89470d7be47854f4f6f219..b4a68ecf0ca78731ea98b0178c45dac926378e3d 100644 (file)
@@ -593,7 +593,7 @@ static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
 {
        struct media_entity *source, *sink;
        unsigned int flags = MEDIA_LNK_FL_ENABLED;
-       int i, ret;
+       int i, ret = 0;
 
        for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
                struct fimc_lite *fimc = fmd->fimc_lite[i];
index 379f574337119e4aecc60896e3ae0fc6fc336e39..681bc6ba149db1bdf380b483c32b9babfb335a58 100644 (file)
@@ -412,62 +412,48 @@ leave_handle_frame:
 }
 
 /* Error handling for interrupt */
-static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
-                                unsigned int reason, unsigned int err)
+static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
+               struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
 {
-       struct s5p_mfc_dev *dev;
        unsigned long flags;
 
-       /* If no context is available then all necessary
-        * processing has been done. */
-       if (ctx == NULL)
-               return;
-
-       dev = ctx->dev;
        mfc_err("Interrupt Error: %08x\n", err);
-       s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-       wake_up_dev(dev, reason, err);
 
-       /* Error recovery is dependent on the state of context */
-       switch (ctx->state) {
-       case MFCINST_INIT:
-               /* This error had to happen while acquireing instance */
-       case MFCINST_GOT_INST:
-               /* This error had to happen while parsing the header */
-       case MFCINST_HEAD_PARSED:
-               /* This error had to happen while setting dst buffers */
-       case MFCINST_RETURN_INST:
-               /* This error had to happen while releasing instance */
-               clear_work_bit(ctx);
-               wake_up_ctx(ctx, reason, err);
-               if (test_and_clear_bit(0, &dev->hw_lock) == 0)
-                       BUG();
-               s5p_mfc_clock_off();
-               ctx->state = MFCINST_ERROR;
-               break;
-       case MFCINST_FINISHING:
-       case MFCINST_FINISHED:
-       case MFCINST_RUNNING:
-               /* It is higly probable that an error occured
-                * while decoding a frame */
-               clear_work_bit(ctx);
-               ctx->state = MFCINST_ERROR;
-               /* Mark all dst buffers as having an error */
-               spin_lock_irqsave(&dev->irqlock, flags);
-               s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->dst_queue,
-                               &ctx->vq_dst);
-               /* Mark all src buffers as having an error */
-               s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue, &ctx->src_queue,
-                               &ctx->vq_src);
-               spin_unlock_irqrestore(&dev->irqlock, flags);
-               if (test_and_clear_bit(0, &dev->hw_lock) == 0)
-                       BUG();
-               s5p_mfc_clock_off();
-               break;
-       default:
-               mfc_err("Encountered an error interrupt which had not been handled\n");
-               break;
+       if (ctx != NULL) {
+               /* Error recovery is dependent on the state of context */
+               switch (ctx->state) {
+               case MFCINST_RES_CHANGE_INIT:
+               case MFCINST_RES_CHANGE_FLUSH:
+               case MFCINST_RES_CHANGE_END:
+               case MFCINST_FINISHING:
+               case MFCINST_FINISHED:
+               case MFCINST_RUNNING:
+                       /* It is higly probable that an error occured
+                        * while decoding a frame */
+                       clear_work_bit(ctx);
+                       ctx->state = MFCINST_ERROR;
+                       /* Mark all dst buffers as having an error */
+                       spin_lock_irqsave(&dev->irqlock, flags);
+                       s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+                                               &ctx->dst_queue, &ctx->vq_dst);
+                       /* Mark all src buffers as having an error */
+                       s5p_mfc_hw_call(dev->mfc_ops, cleanup_queue,
+                                               &ctx->src_queue, &ctx->vq_src);
+                       spin_unlock_irqrestore(&dev->irqlock, flags);
+                       wake_up_ctx(ctx, reason, err);
+                       break;
+               default:
+                       clear_work_bit(ctx);
+                       ctx->state = MFCINST_ERROR;
+                       wake_up_ctx(ctx, reason, err);
+                       break;
+               }
        }
+       if (test_and_clear_bit(0, &dev->hw_lock) == 0)
+               BUG();
+       s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
+       s5p_mfc_clock_off();
+       wake_up_dev(dev, reason, err);
        return;
 }
 
@@ -632,7 +618,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
                                dev->warn_start)
                        s5p_mfc_handle_frame(ctx, reason, err);
                else
-                       s5p_mfc_handle_error(ctx, reason, err);
+                       s5p_mfc_handle_error(dev, ctx, reason, err);
                clear_bit(0, &dev->enter_suspend);
                break;
 
index e10e525f33e51f0a14b5c3f5b7ef0006ae3524cb..296941a9ae25decc88dd4d0da8b2240319eb2b34 100644 (file)
@@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
        radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
        radio->vdev.lock = &radio->lock;
        radio->vdev.release = video_device_release_empty;
+       radio->vdev.vfl_dir = VFL_DIR_TX;
 
        radio->usbdev = interface_to_usbdev(intf);
        radio->intf = intf;
index a082e400ed0f6ed8022fd3562231e5cc969c10d2..1507c9d508d7c59e98ea0f58cbc017726b3329c5 100644 (file)
@@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
        .name                   = "radio-si4713",
        .release                = video_device_release,
        .ioctl_ops              = &radio_si4713_ioctl_ops,
+       .vfl_dir                = VFL_DIR_TX,
 };
 
 /* Platform driver interface */
index c48be195bbad59a917141489887a8b4a1356a970..cabbe3adf435b6a9f3ef2176109489524565513f 100644 (file)
@@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
        .ioctl_ops              = &wl1273_ioctl_ops,
        .name                   = WL1273_FM_DRIVER_NAME,
        .release                = wl1273_vdev_release,
+       .vfl_dir                = VFL_DIR_TX,
 };
 
 static int wl1273_fm_radio_remove(struct platform_device *pdev)
index 048de45360360251095639d1422f9931853d3fba..0a8ee8fab9242476c14c153b0cc00b51387a7b26 100644 (file)
@@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
        .ioctl_ops = &fm_drv_ioctl_ops,
        .name = FM_DRV_NAME,
        .release = video_device_release,
+       /*
+        * To ensure both the tuner and modulator ioctls are accessible we
+        * set the vfl_dir to M2M to indicate this.
+        *
+        * It is not really a mem2mem device of course, but it can both receive
+        * and transmit using the same radio device. It's the only radio driver
+        * that does this and it should really be split in two radio devices,
+        * but that would affect applications using this driver.
+        */
+       .vfl_dir = VFL_DIR_M2M,
 };
 
 int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
index 40ad6687ee5dee1e53cad6489a386438f5d2a483..3773a8a745df9323719d4aaeb6dd949fa0e61cad 100644 (file)
@@ -381,6 +381,7 @@ static const struct sd_desc sd_desc = {
 /* -- module initialisation -- */
 static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x045e, 0x02ae)},
+       {USB_DEVICE(0x045e, 0x02bf)},
        {}
 };
 
index 70511d5f953857492aca04655cdf079836a998dc..1220340e76028654385ec448c7a98a310d3d89f6 100644 (file)
@@ -496,7 +496,7 @@ static void reg_w(struct gspca_dev *gspca_dev,
        }
 }
 
-static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
+static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
 {
        int retry = 60;
 
@@ -504,16 +504,19 @@ static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
                return;
 
        /* is i2c ready */
-       reg_w(gspca_dev, 0x08, buffer, 8);
+       reg_w(gspca_dev, 0x08, buf, 8);
        while (retry--) {
                if (gspca_dev->usb_err < 0)
                        return;
-               msleep(10);
+               msleep(1);
                reg_r(gspca_dev, 0x08);
                if (gspca_dev->usb_buf[0] & 0x04) {
                        if (gspca_dev->usb_buf[0] & 0x08) {
                                dev_err(gspca_dev->v4l2_dev.dev,
-                                       "i2c write error\n");
+                                       "i2c error writing %02x %02x %02x %02x"
+                                       " %02x %02x %02x %02x\n",
+                                       buf[0], buf[1], buf[2], buf[3],
+                                       buf[4], buf[5], buf[6], buf[7]);
                                gspca_dev->usb_err = -EIO;
                        }
                        return;
@@ -530,7 +533,7 @@ static void i2c_w_vector(struct gspca_dev *gspca_dev,
        for (;;) {
                if (gspca_dev->usb_err < 0)
                        return;
-               reg_w(gspca_dev, 0x08, *buffer, 8);
+               i2c_w(gspca_dev, *buffer);
                len -= 8;
                if (len <= 0)
                        break;
index 5a86047b846f484c2d1c22d88023878d2249b646..36307a9028a932b6414523e0a189c613479daa90 100644 (file)
@@ -1550,6 +1550,7 @@ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val)
                        0,
                        gspca_dev->usb_buf, 8,
                        500);
+       msleep(2);
        if (ret < 0) {
                pr_err("i2c_w1 err %d\n", ret);
                gspca_dev->usb_err = ret;
index 2bb7613ddebbb0590d702416ac9f7c5872ffbbb5..d5baab17a5ef77b6bbeb85f8cf3e2c1b0127b5b7 100644 (file)
@@ -1431,8 +1431,10 @@ int uvc_ctrl_set(struct uvc_video_chain *chain,
        int ret;
 
        ctrl = uvc_find_control(chain, xctrl->id, &mapping);
-       if (ctrl == NULL || (ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) == 0)
+       if (ctrl == NULL)
                return -EINVAL;
+       if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR))
+               return -EACCES;
 
        /* Clamp out of range values. */
        switch (mapping->v4l2_type) {
index f2ee8c6b0d8dbb8ceb5d7ebad4bcfd85d878b6dd..68d59b52749288e00dcb816f42952516f160f6db 100644 (file)
@@ -657,8 +657,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
                        ret = uvc_ctrl_get(chain, ctrl);
                        if (ret < 0) {
                                uvc_ctrl_rollback(handle);
-                               ctrls->error_idx = ret == -ENOENT
-                                                ? ctrls->count : i;
+                               ctrls->error_idx = i;
                                return ret;
                        }
                }
@@ -686,8 +685,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg)
                        ret = uvc_ctrl_set(chain, ctrl);
                        if (ret < 0) {
                                uvc_ctrl_rollback(handle);
-                               ctrls->error_idx = (ret == -ENOENT &&
-                                                   cmd == VIDIOC_S_EXT_CTRLS)
+                               ctrls->error_idx = cmd == VIDIOC_S_EXT_CTRLS
                                                 ? ctrls->count : i;
                                return ret;
                        }
index 9f81be23a81f06a1d527ac792071e1b7a6154979..e02c4797b1c65fe46ef1352fa72f84e05317fe6c 100644 (file)
@@ -921,8 +921,10 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
                 * In videobuf we use our internal V4l2_planes struct for
                 * single-planar buffers as well, for simplicity.
                 */
-               if (V4L2_TYPE_IS_OUTPUT(b->type))
+               if (V4L2_TYPE_IS_OUTPUT(b->type)) {
                        v4l2_planes[0].bytesused = b->bytesused;
+                       v4l2_planes[0].data_offset = 0;
+               }
 
                if (b->memory == V4L2_MEMORY_USERPTR) {
                        v4l2_planes[0].m.userptr = b->m.userptr;
index 1c0abd4dfc43eeef01bb096bb33e5a1bdc08ef5e..ff553babf455025c2a5ce303297a15f404d2d5e3 100644 (file)
@@ -237,6 +237,7 @@ config MFD_TPS65910
        depends on I2C=y && GPIOLIB
        select MFD_CORE
        select REGMAP_I2C
+       select REGMAP_IRQ
        select IRQ_DOMAIN
        help
          if you say yes here you get support for the TPS65910 series of
@@ -292,6 +293,7 @@ config TWL4030_CORE
        bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
        depends on I2C=y && GENERIC_HARDIRQS
        select IRQ_DOMAIN
+       select REGMAP_I2C
        help
          Say yes here if you have TWL4030 / TWL6030 family chip on your board.
          This core driver provides register access and IRQ handling
index e1650badd106da6e3992b219e77fdc35e20ad1c8..8b5d685ab98089bdbc0afa2b29d33064193d8f85 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mfd/core.h>
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/regulator/ab8500.h>
 #include <linux/of.h>
@@ -749,6 +750,12 @@ static struct resource ab8500_charger_resources[] = {
                .end = AB8500_INT_CH_WD_EXP,
                .flags = IORESOURCE_IRQ,
        },
+       {
+               .name = "VBUS_CH_DROP_END",
+               .start = AB8500_INT_VBUS_CH_DROP_END,
+               .end = AB8500_INT_VBUS_CH_DROP_END,
+               .flags = IORESOURCE_IRQ,
+       },
 };
 
 static struct resource ab8500_btemp_resources[] = {
@@ -1011,40 +1018,32 @@ static struct mfd_cell ab8500_bm_devs[] = {
                .of_compatible = "stericsson,ab8500-charger",
                .num_resources = ARRAY_SIZE(ab8500_charger_resources),
                .resources = ab8500_charger_resources,
-#ifndef CONFIG_OF
                .platform_data = &ab8500_bm_data,
                .pdata_size = sizeof(ab8500_bm_data),
-#endif
        },
        {
                .name = "ab8500-btemp",
                .of_compatible = "stericsson,ab8500-btemp",
                .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
                .resources = ab8500_btemp_resources,
-#ifndef CONFIG_OF
                .platform_data = &ab8500_bm_data,
                .pdata_size = sizeof(ab8500_bm_data),
-#endif
        },
        {
                .name = "ab8500-fg",
                .of_compatible = "stericsson,ab8500-fg",
                .num_resources = ARRAY_SIZE(ab8500_fg_resources),
                .resources = ab8500_fg_resources,
-#ifndef CONFIG_OF
                .platform_data = &ab8500_bm_data,
                .pdata_size = sizeof(ab8500_bm_data),
-#endif
        },
        {
                .name = "ab8500-chargalg",
                .of_compatible = "stericsson,ab8500-chargalg",
                .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
                .resources = ab8500_chargalg_resources,
-#ifndef CONFIG_OF
                .platform_data = &ab8500_bm_data,
                .pdata_size = sizeof(ab8500_bm_data),
-#endif
        },
 };
 
index bc8a3edb6bbf2b2fdc9c3f15a425eafb0bcb23af..222c03a5ddc0b3d3e9a37af453d4a1745f1b35e3 100644 (file)
@@ -239,7 +239,12 @@ static int arizona_runtime_resume(struct device *dev)
                return ret;
        }
 
-       regcache_sync(arizona->regmap);
+       ret = regcache_sync(arizona->regmap);
+       if (ret != 0) {
+               dev_err(arizona->dev, "Failed to restore register cache\n");
+               regulator_disable(arizona->dcvdd);
+               return ret;
+       }
 
        return 0;
 }
index 74713bf5371fa42361dbb2342407c0dfe9c039b6..2bec5f0db3ee5e80733a88043d96b3219405d2f0 100644 (file)
@@ -176,14 +176,7 @@ int arizona_irq_init(struct arizona *arizona)
                aod = &wm5102_aod;
                irq = &wm5102_irq;
 
-               switch (arizona->rev) {
-               case 0:
-               case 1:
-                       ctrlif_error = false;
-                       break;
-               default:
-                       break;
-               }
+               ctrlif_error = false;
                break;
 #endif
 #ifdef CONFIG_MFD_WM5110
@@ -191,14 +184,7 @@ int arizona_irq_init(struct arizona *arizona)
                aod = &wm5110_aod;
                irq = &wm5110_irq;
 
-               switch (arizona->rev) {
-               case 0:
-               case 1:
-                       ctrlif_error = false;
-                       break;
-               default:
-                       break;
-               }
+               ctrlif_error = false;
                break;
 #endif
        default:
index ac74a4d1daead1848ef00e9ac096013d69e67a21..885e567803583a42473ffb532beead311ad68cf2 100644 (file)
 #include <linux/of_device.h>
 #endif
 
+/* I2C safe register check */
+static inline bool i2c_safe_reg(unsigned char reg)
+{
+       switch (reg) {
+       case DA9052_STATUS_A_REG:
+       case DA9052_STATUS_B_REG:
+       case DA9052_STATUS_C_REG:
+       case DA9052_STATUS_D_REG:
+       case DA9052_ADC_RES_L_REG:
+       case DA9052_ADC_RES_H_REG:
+       case DA9052_VDD_RES_REG:
+       case DA9052_ICHG_AV_REG:
+       case DA9052_TBAT_RES_REG:
+       case DA9052_ADCIN4_RES_REG:
+       case DA9052_ADCIN5_RES_REG:
+       case DA9052_ADCIN6_RES_REG:
+       case DA9052_TJUNC_RES_REG:
+       case DA9052_TSI_X_MSB_REG:
+       case DA9052_TSI_Y_MSB_REG:
+       case DA9052_TSI_LSB_REG:
+       case DA9052_TSI_Z_MSB_REG:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/*
+ * There is an issue with DA9052 and DA9053_AA/BA/BB PMIC where the PMIC
+ * gets lockup up or fails to respond following a system reset.
+ * This fix is to follow any read or write with a dummy read to a safe
+ * register.
+ */
+int da9052_i2c_fix(struct da9052 *da9052, unsigned char reg)
+{
+       int val;
+
+       switch (da9052->chip_id) {
+       case DA9052:
+       case DA9053_AA:
+       case DA9053_BA:
+       case DA9053_BB:
+               /* A dummy read to a safe register address. */
+       if (!i2c_safe_reg(reg))
+                       return regmap_read(da9052->regmap,
+                                          DA9052_PARK_REGISTER,
+                                          &val);
+               break;
+       default:
+               /*
+                * For other chips parking of I2C register
+                * to a safe place is not required.
+                */
+               break;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(da9052_i2c_fix);
+
 static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
 {
        int reg_val, ret;
@@ -83,6 +143,7 @@ static int da9052_i2c_probe(struct i2c_client *client,
 
        da9052->dev = &client->dev;
        da9052->chip_irq = client->irq;
+       da9052->fix_io = da9052_i2c_fix;
 
        i2c_set_clientdata(client, da9052);
 
index dc8826d8d69da0d1ee8c29911dc6c95ade7f1da2..268f45d4239427bdb172f65e8c1768d4cd2bd435 100644 (file)
@@ -2524,7 +2524,7 @@ static bool read_mailbox_0(void)
 
                for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
                        if (ev & prcmu_irq_bit[n])
-                               generic_handle_irq(IRQ_PRCMU_BASE + n);
+                               generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
                }
                r = true;
                break;
@@ -2737,13 +2737,14 @@ static int db8500_irq_map(struct irq_domain *d, unsigned int virq,
 }
 
 static struct irq_domain_ops db8500_irq_ops = {
-        .map    = db8500_irq_map,
-        .xlate  = irq_domain_xlate_twocell,
+       .map    = db8500_irq_map,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 static int db8500_irq_init(struct device_node *np)
 {
-       int irq_base = -1;
+       int irq_base = 0;
+       int i;
 
        /* In the device tree case, just take some IRQs */
        if (!np)
@@ -2758,6 +2759,10 @@ static int db8500_irq_init(struct device_node *np)
                return -ENOSYS;
        }
 
+       /* All wakeups will be used, so create mappings for all */
+       for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
+               irq_create_mapping(db8500_irq_domain, i);
+
        return 0;
 }
 
index f6878f8db57d105b663d399bc2830bbb0ce0b4bf..4d73963cd8f0188d2f9e09c9bbdcde0cf5c35007 100644 (file)
@@ -93,15 +93,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
        if (max77686 == NULL)
                return -ENOMEM;
 
-       max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
-       if (IS_ERR(max77686->regmap)) {
-               ret = PTR_ERR(max77686->regmap);
-               dev_err(max77686->dev, "Failed to allocate register map: %d\n",
-                               ret);
-               kfree(max77686);
-               return ret;
-       }
-
        i2c_set_clientdata(i2c, max77686);
        max77686->dev = &i2c->dev;
        max77686->i2c = i2c;
@@ -111,6 +102,15 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
        max77686->irq_gpio = pdata->irq_gpio;
        max77686->irq = i2c->irq;
 
+       max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+       if (IS_ERR(max77686->regmap)) {
+               ret = PTR_ERR(max77686->regmap);
+               dev_err(max77686->dev, "Failed to allocate register map: %d\n",
+                               ret);
+               kfree(max77686);
+               return ret;
+       }
+
        if (regmap_read(max77686->regmap,
                         MAX77686_REG_DEVICE_ID, &data) < 0) {
                dev_err(max77686->dev,
index cc5155e20494726c2ae6954e64128f61973ebafd..9e60fed5ff82a81dbf774c7d8af7547f12861516 100644 (file)
@@ -114,35 +114,37 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
        u8 reg_data;
        int ret = 0;
 
+       if (!pdata) {
+               dev_err(&i2c->dev, "No platform data found.\n");
+               return -EINVAL;
+       }
+
        max77693 = devm_kzalloc(&i2c->dev,
                        sizeof(struct max77693_dev), GFP_KERNEL);
        if (max77693 == NULL)
                return -ENOMEM;
 
-       max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
-       if (IS_ERR(max77693->regmap)) {
-               ret = PTR_ERR(max77693->regmap);
-               dev_err(max77693->dev,"failed to allocate register map: %d\n",
-                               ret);
-               goto err_regmap;
-       }
-
        i2c_set_clientdata(i2c, max77693);
        max77693->dev = &i2c->dev;
        max77693->i2c = i2c;
        max77693->irq = i2c->irq;
        max77693->type = id->driver_data;
 
-       if (!pdata)
-               goto err_regmap;
+       max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+       if (IS_ERR(max77693->regmap)) {
+               ret = PTR_ERR(max77693->regmap);
+               dev_err(max77693->dev, "failed to allocate register map: %d\n",
+                               ret);
+               return ret;
+       }
 
        max77693->wakeup = pdata->wakeup;
 
-       if (max77693_read_reg(max77693->regmap,
-                               MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+       ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_PMIC_ID2,
+                               &reg_data);
+       if (ret < 0) {
                dev_err(max77693->dev, "device not found on this channel\n");
-               ret = -ENODEV;
-               goto err_regmap;
+               return ret;
        } else
                dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
 
@@ -163,7 +165,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
                ret = PTR_ERR(max77693->regmap_muic);
                dev_err(max77693->dev,
                        "failed to allocate register map: %d\n", ret);
-               goto err_regmap;
+               goto err_regmap_muic;
        }
 
        ret = max77693_irq_init(max77693);
@@ -184,9 +186,9 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
 err_mfd:
        max77693_irq_exit(max77693);
 err_irq:
+err_regmap_muic:
        i2c_unregister_device(max77693->muic);
        i2c_unregister_device(max77693->haptic);
-err_regmap:
        return ret;
 }
 
index 64803f13bcecc4f6e8f221ecacf814e419d07104..d11567307fbed6c930f955abf198de1157dfb237 100644 (file)
@@ -208,6 +208,8 @@ static int pcf50633_probe(struct i2c_client *client,
        if (!pcf)
                return -ENOMEM;
 
+       i2c_set_clientdata(client, pcf);
+       pcf->dev = &client->dev;
        pcf->pdata = pdata;
 
        mutex_init(&pcf->lock);
@@ -219,9 +221,6 @@ static int pcf50633_probe(struct i2c_client *client,
                return ret;
        }
 
-       i2c_set_clientdata(client, pcf);
-       pcf->dev = &client->dev;
-
        version = pcf50633_reg_read(pcf, 0);
        variant = pcf50633_reg_read(pcf, 1);
        if (version < 0 || variant < 0) {
index 89f046ca9e410292b9b1a51423544ab1a40aebf2..3d3b4addf81a9b73b480ae3874e2675e1678b52b 100644 (file)
@@ -112,6 +112,21 @@ static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
                        BPP_LDO_POWB, BPP_LDO_SUSPEND);
 }
 
+static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       u8 mask, val;
+
+       mask = (BPP_REG_TUNED18 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_MASK;
+       if (voltage == OUTPUT_3V3)
+               val = (BPP_ASIC_3V3 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_3V3;
+       else if (voltage == OUTPUT_1V8)
+               val = (BPP_ASIC_1V8 << BPP_TUNED18_SHIFT_8411) | BPP_PAD_1V8;
+       else
+               return -EINVAL;
+
+       return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
+}
+
 static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
 {
        unsigned int card_exist;
@@ -163,6 +178,18 @@ static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
        return card_exist;
 }
 
+static int rtl8411_conv_clk_and_div_n(int input, int dir)
+{
+       int output;
+
+       if (dir == CLK_TO_DIV_N)
+               output = input * 4 / 5 - 2;
+       else
+               output = (input + 2) * 5 / 4;
+
+       return output;
+}
+
 static const struct pcr_ops rtl8411_pcr_ops = {
        .extra_init_hw = rtl8411_extra_init_hw,
        .optimize_phy = NULL,
@@ -172,7 +199,9 @@ static const struct pcr_ops rtl8411_pcr_ops = {
        .disable_auto_blink = rtl8411_disable_auto_blink,
        .card_power_on = rtl8411_card_power_on,
        .card_power_off = rtl8411_card_power_off,
+       .switch_output_voltage = rtl8411_switch_output_voltage,
        .cd_deglitch = rtl8411_cd_deglitch,
+       .conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
 };
 
 /* SD Pull Control Enable:
index 283a4f148084ab29f34d2444d8f264e72345e846..98fe0f39463ed1596f1e0881f5d7352349da400d 100644 (file)
@@ -144,6 +144,25 @@ static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
        return rtsx_pci_send_cmd(pcr, 100);
 }
 
+static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       int err;
+
+       if (voltage == OUTPUT_3V3) {
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+               if (err < 0)
+                       return err;
+       } else if (voltage == OUTPUT_1V8) {
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+               if (err < 0)
+                       return err;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pcr_ops rts5209_pcr_ops = {
        .extra_init_hw = rts5209_extra_init_hw,
        .optimize_phy = rts5209_optimize_phy,
@@ -153,7 +172,9 @@ static const struct pcr_ops rts5209_pcr_ops = {
        .disable_auto_blink = rts5209_disable_auto_blink,
        .card_power_on = rts5209_card_power_on,
        .card_power_off = rts5209_card_power_off,
+       .switch_output_voltage = rts5209_switch_output_voltage,
        .cd_deglitch = NULL,
+       .conv_clk_and_div_n = NULL,
 };
 
 /* SD Pull Control Enable:
index b9dbab266fdadc9f3f60e219f6dc5e25b9322036..29d889cbb9c5183262897bc5e29818c536bb227d 100644 (file)
@@ -114,6 +114,25 @@ static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
        return rtsx_pci_send_cmd(pcr, 100);
 }
 
+static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       int err;
+
+       if (voltage == OUTPUT_3V3) {
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
+               if (err < 0)
+                       return err;
+       } else if (voltage == OUTPUT_1V8) {
+               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
+               if (err < 0)
+                       return err;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pcr_ops rts5229_pcr_ops = {
        .extra_init_hw = rts5229_extra_init_hw,
        .optimize_phy = rts5229_optimize_phy,
@@ -123,7 +142,9 @@ static const struct pcr_ops rts5229_pcr_ops = {
        .disable_auto_blink = rts5229_disable_auto_blink,
        .card_power_on = rts5229_card_power_on,
        .card_power_off = rts5229_card_power_off,
+       .switch_output_voltage = rts5229_switch_output_voltage,
        .cd_deglitch = NULL,
+       .conv_clk_and_div_n = NULL,
 };
 
 /* SD Pull Control Enable:
index 7a7b0bda4618926fffea21dc23a3f386e1303777..9fc57009e22813fe257481b39352533f2fbd5d45 100644 (file)
@@ -630,7 +630,10 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
        if (clk == pcr->cur_clock)
                return 0;
 
-       N = (u8)(clk - 2);
+       if (pcr->ops->conv_clk_and_div_n)
+               N = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
+       else
+               N = (u8)(clk - 2);
        if ((clk <= 2) || (N > max_N))
                return -EINVAL;
 
@@ -641,7 +644,14 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
        /* Make sure that the SSC clock div_n is equal or greater than min_N */
        div = CLK_DIV_1;
        while ((N < min_N) && (div < max_div)) {
-               N = (N + 2) * 2 - 2;
+               if (pcr->ops->conv_clk_and_div_n) {
+                       int dbl_clk = pcr->ops->conv_clk_and_div_n(N,
+                                       DIV_N_TO_CLK) * 2;
+                       N = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
+                                       CLK_TO_DIV_N);
+               } else {
+                       N = (N + 2) * 2 - 2;
+               }
                div++;
        }
        dev_dbg(&(pcr->pci->dev), "N = %d, div = %d\n", N, div);
@@ -703,6 +713,15 @@ int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
 }
 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
 
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+       if (pcr->ops->switch_output_voltage)
+               return pcr->ops->switch_output_voltage(pcr, voltage);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
+
 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
 {
        unsigned int val;
@@ -767,10 +786,10 @@ static void rtsx_pci_card_detect(struct work_struct *work)
 
        spin_unlock_irqrestore(&pcr->lock, flags);
 
-       if (card_detect & SD_EXIST)
+       if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
                pcr->slots[RTSX_SD_CARD].card_event(
                                pcr->slots[RTSX_SD_CARD].p_dev);
-       if (card_detect & MS_EXIST)
+       if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
                pcr->slots[RTSX_MS_CARD].card_event(
                                pcr->slots[RTSX_MS_CARD].p_dev);
 }
index 49d361a618d06f5f26d17c49a8c9d48e20547caf..77ee26ef594176aceb8b5d8a2518b5eb13f64479 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/of_irq.h>
 #include <linux/interrupt.h>
 #include <linux/pm_runtime.h>
 #include <linux/mutex.h>
@@ -60,6 +61,15 @@ static struct mfd_cell s2mps11_devs[] = {
        },
 };
 
+#ifdef CONFIG_OF
+static struct of_device_id sec_dt_match[] = {
+       {       .compatible = "samsung,s5m8767-pmic",
+               .data = (void *)S5M8767X,
+       },
+       {},
+};
+#endif
+
 int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
 {
        return regmap_read(sec_pmic->regmap, reg, dest);
@@ -95,6 +105,57 @@ static struct regmap_config sec_regmap_config = {
        .val_bits = 8,
 };
 
+
+#ifdef CONFIG_OF
+/*
+ * Only the common platform data elements for s5m8767 are parsed here from the
+ * device tree. Other sub-modules of s5m8767 such as pmic, rtc , charger and
+ * others have to parse their own platform data elements from device tree.
+ *
+ * The s5m8767 platform data structure is instantiated here and the drivers for
+ * the sub-modules need not instantiate another instance while parsing their
+ * platform data.
+ */
+static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
+                                       struct device *dev)
+{
+       struct sec_platform_data *pd;
+
+       pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+       if (!pd) {
+               dev_err(dev, "could not allocate memory for pdata\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /*
+        * ToDo: the 'wakeup' member in the platform data is more of a linux
+        * specfic information. Hence, there is no binding for that yet and
+        * not parsed here.
+        */
+
+       return pd;
+}
+#else
+static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
+                                       struct device *dev)
+{
+       return 0;
+}
+#endif
+
+static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
+                                               const struct i2c_device_id *id)
+{
+#ifdef CONFIG_OF
+       if (i2c->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_node(sec_dt_match, i2c->dev.of_node);
+               return (int)match->data;
+       }
+#endif
+       return (int)id->driver_data;
+}
+
 static int sec_pmic_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
@@ -111,13 +172,22 @@ static int sec_pmic_probe(struct i2c_client *i2c,
        sec_pmic->dev = &i2c->dev;
        sec_pmic->i2c = i2c;
        sec_pmic->irq = i2c->irq;
-       sec_pmic->type = id->driver_data;
-
+       sec_pmic->type = sec_i2c_get_driver_data(i2c, id);
+
+       if (sec_pmic->dev->of_node) {
+               pdata = sec_pmic_i2c_parse_dt_pdata(sec_pmic->dev);
+               if (IS_ERR(pdata)) {
+                       ret = PTR_ERR(pdata);
+                       return ret;
+               }
+               pdata->device_type = sec_pmic->type;
+       }
        if (pdata) {
                sec_pmic->device_type = pdata->device_type;
                sec_pmic->ono = pdata->ono;
                sec_pmic->irq_base = pdata->irq_base;
                sec_pmic->wakeup = pdata->wakeup;
+               sec_pmic->pdata = pdata;
        }
 
        sec_pmic->regmap = devm_regmap_init_i2c(i2c, &sec_regmap_config);
@@ -192,6 +262,7 @@ static struct i2c_driver sec_pmic_driver = {
        .driver = {
                   .name = "sec_pmic",
                   .owner = THIS_MODULE,
+                  .of_match_table = of_match_ptr(sec_dt_match),
        },
        .probe = sec_pmic_probe,
        .remove = sec_pmic_remove,
index a06d66b929b1ea96743826435d18899464fb24b5..ecc092c7f7453e4cfb7146fc222546869239f8b0 100644 (file)
@@ -219,25 +219,18 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
 }
 
 static struct irq_domain_ops tc3589x_irq_ops = {
-        .map    = tc3589x_irq_map,
+       .map    = tc3589x_irq_map,
        .unmap  = tc3589x_irq_unmap,
-        .xlate  = irq_domain_xlate_twocell,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
 {
        int base = tc3589x->irq_base;
 
-       if (base) {
-               tc3589x->domain = irq_domain_add_legacy(
-                       NULL, TC3589x_NR_INTERNAL_IRQS, base,
-                       0, &tc3589x_irq_ops, tc3589x);
-       }
-       else {
-               tc3589x->domain = irq_domain_add_linear(
-                       np, TC3589x_NR_INTERNAL_IRQS,
-                       &tc3589x_irq_ops, tc3589x);
-       }
+       tc3589x->domain = irq_domain_add_simple(
+               np, TC3589x_NR_INTERNAL_IRQS, base,
+               &tc3589x_irq_ops, tc3589x);
 
        if (!tc3589x->domain) {
                dev_err(tc3589x->dev, "Failed to create irqdomain\n");
index 4dae241e501734f66c9080283a0c43188729b0b5..dd362c1078e1438b3751ec69dc1c8b1ad3efa0bb 100644 (file)
@@ -159,7 +159,7 @@ out:
 static int twl4030_write_script(u8 address, struct twl4030_ins *script,
                                       int len)
 {
-       int err;
+       int err = -EINVAL;
 
        for (; len; len--, address++, script++) {
                if (len == 1) {
index fae15d880758cc197ed0bfe8e095daca74f01bd5..3c1723aa62250c28c61aab4b3f83442e28770dc8 100644 (file)
@@ -67,6 +67,7 @@ struct vexpress_config_bridge *vexpress_config_bridge_register(
 
        return bridge;
 }
+EXPORT_SYMBOL(vexpress_config_bridge_register);
 
 void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
 {
@@ -83,6 +84,7 @@ void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge)
        while (!list_empty(&__bridge.transactions))
                cpu_relax();
 }
+EXPORT_SYMBOL(vexpress_config_bridge_unregister);
 
 
 struct vexpress_config_func {
@@ -142,6 +144,7 @@ struct vexpress_config_func *__vexpress_config_func_get(struct device *dev,
 
        return func;
 }
+EXPORT_SYMBOL(__vexpress_config_func_get);
 
 void vexpress_config_func_put(struct vexpress_config_func *func)
 {
@@ -149,7 +152,7 @@ void vexpress_config_func_put(struct vexpress_config_func *func)
        of_node_put(func->bridge->node);
        kfree(func);
 }
-
+EXPORT_SYMBOL(vexpress_config_func_put);
 
 struct vexpress_config_trans {
        struct vexpress_config_func *func;
@@ -229,6 +232,7 @@ void vexpress_config_complete(struct vexpress_config_bridge *bridge,
 
        complete(&trans->completion);
 }
+EXPORT_SYMBOL(vexpress_config_complete);
 
 int vexpress_config_wait(struct vexpress_config_trans *trans)
 {
@@ -236,7 +240,7 @@ int vexpress_config_wait(struct vexpress_config_trans *trans)
 
        return trans->status;
 }
-
+EXPORT_SYMBOL(vexpress_config_wait);
 
 int vexpress_config_read(struct vexpress_config_func *func, int offset,
                u32 *data)
index e5d8f63b252aa887e0493d3ae44725792217daea..77048b18439e3e6467b79f7ba4b2b3ac06e36a6e 100644 (file)
@@ -313,19 +313,11 @@ static void vexpress_sysreg_config_complete(unsigned long data)
 }
 
 
-void __init vexpress_sysreg_early_init(void __iomem *base)
+void __init vexpress_sysreg_setup(struct device_node *node)
 {
-       struct device_node *node = of_find_compatible_node(NULL, NULL,
-                       "arm,vexpress-sysreg");
-
-       if (node)
-               base = of_iomap(node, 0);
-
-       if (WARN_ON(!base))
+       if (WARN_ON(!vexpress_sysreg_base))
                return;
 
-       vexpress_sysreg_base = base;
-
        if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE)
                vexpress_master_site = VEXPRESS_SITE_DB2;
        else
@@ -336,9 +328,23 @@ void __init vexpress_sysreg_early_init(void __iomem *base)
        WARN_ON(!vexpress_sysreg_config_bridge);
 }
 
+void __init vexpress_sysreg_early_init(void __iomem *base)
+{
+       vexpress_sysreg_base = base;
+       vexpress_sysreg_setup(NULL);
+}
+
 void __init vexpress_sysreg_of_early_init(void)
 {
-       vexpress_sysreg_early_init(NULL);
+       struct device_node *node = of_find_compatible_node(NULL, NULL,
+                       "arm,vexpress-sysreg");
+
+       if (node) {
+               vexpress_sysreg_base = of_iomap(node, 0);
+               vexpress_sysreg_setup(node);
+       } else {
+               pr_info("vexpress-sysreg: No Device Tree node found.");
+       }
 }
 
 
@@ -426,9 +432,11 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
                return -EBUSY;
        }
 
-       if (!vexpress_sysreg_base)
+       if (!vexpress_sysreg_base) {
                vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start,
                                resource_size(res));
+               vexpress_sysreg_setup(pdev->dev.of_node);
+       }
 
        if (!vexpress_sysreg_base) {
                dev_err(&pdev->dev, "Failed to obtain base address!\n");
index 088872ab63389e49d04dd3f38fb02664fdc929ba..f6fcb87b3504adc8e966d8225efcee066d2977e1 100644 (file)
@@ -96,6 +96,7 @@ const struct regmap_irq_chip wm5102_aod = {
        .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
        .ack_base = ARIZONA_AOD_IRQ1,
        .wake_base = ARIZONA_WAKE_CONTROL,
+       .wake_invert = 1,
        .num_regs = 1,
        .irqs = wm5102_aod_irqs,
        .num_irqs = ARRAY_SIZE(wm5102_aod_irqs),
@@ -1882,7 +1883,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
        }
 }
 
-#define WM5102_MAX_REGISTER 0x1a8fff
+#define WM5102_MAX_REGISTER 0x1a9800
 
 const struct regmap_config wm5102_spi_regmap = {
        .reg_bits = 32,
index adda6b10b90d87ff64e29503c20139022f97465c..c41599815299ddb043654f1008864329c1d21ca0 100644 (file)
@@ -255,6 +255,7 @@ const struct regmap_irq_chip wm5110_aod = {
        .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
        .ack_base = ARIZONA_AOD_IRQ1,
        .wake_base = ARIZONA_WAKE_CONTROL,
+       .wake_invert = 1,
        .num_regs = 1,
        .irqs = wm5110_aod_irqs,
        .num_irqs = ARRAY_SIZE(wm5110_aod_irqs),
index 158da5a81a661824a2dca0ee2a9fe93fb5cacfb6..3c09cbb70b1dc01e45f1d6f4fdb93d7e351ad880 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 
 #include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
 
 /* Serialize access to ssc_list and user count */
 static DEFINE_SPINLOCK(user_lock);
@@ -131,6 +132,13 @@ static int ssc_probe(struct platform_device *pdev)
        struct resource *regs;
        struct ssc_device *ssc;
        const struct atmel_ssc_platform_data *plat_dat;
+       struct pinctrl *pinctrl;
+
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               dev_err(&pdev->dev, "Failed to request pinctrl\n");
+               return PTR_ERR(pinctrl);
+       }
 
        ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
        if (!ssc) {
index 18794aea606282a8925a4ef93d9596948760c3f0..e40ffd9502d17eddaf7f41618d3f33d41b8012af 100644 (file)
@@ -187,13 +187,13 @@ int mei_amthif_read(struct mei_device *dev, struct file *file,
                wait_ret = wait_event_interruptible(dev->iamthif_cl.wait,
                        (cb = mei_amthif_find_read_list_entry(dev, file)));
 
+               /* Locking again the Mutex */
+               mutex_lock(&dev->device_lock);
+
                if (wait_ret)
                        return -ERESTARTSYS;
 
                dev_dbg(&dev->pdev->dev, "woke up from sleep\n");
-
-               /* Locking again the Mutex */
-               mutex_lock(&dev->device_lock);
        }
 
 
index 492c8cac69acfbd78d6ee33170b58617d8891bc3..44d273c5e19d7befc7161f566b42f7e3e544ad18 100644 (file)
@@ -517,7 +517,7 @@ static int __init gru_init(void)
 {
        int ret;
 
-       if (!is_uv_system())
+       if (!is_uv_system() || (is_uvx_hub() && !is_uv2_hub()))
                return 0;
 
 #if defined CONFIG_IA64
index 9ff942a346edb59ff05d3a3fec52f052ab8849bb..83269f1d16e380e7b7c4e9ff234b456973c19f72 100644 (file)
@@ -468,6 +468,11 @@ long st_kim_start(void *kim_data)
                if (pdata->chip_enable)
                        pdata->chip_enable(kim_gdata);
 
+               /* Configure BT nShutdown to HIGH state */
+               gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+               mdelay(5);      /* FIXME: a proper toggle */
+               gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+               mdelay(100);
                /* re-initialize the completion */
                INIT_COMPLETION(kim_gdata->ldisc_installed);
                /* send notification to UIM */
@@ -509,7 +514,8 @@ long st_kim_start(void *kim_data)
  *     (b) upon failure to either install ldisc or download firmware.
  *     The function is responsible to (a) notify UIM about un-installation,
  *     (b) flush UART if the ldisc was installed.
- *     (c) invoke platform's chip disabling routine.
+ *     (c) reset BT_EN - pull down nshutdown at the end.
+ *     (d) invoke platform's chip disabling routine.
  */
 long st_kim_stop(void *kim_data)
 {
@@ -541,6 +547,13 @@ long st_kim_stop(void *kim_data)
                err = -ETIMEDOUT;
        }
 
+       /* By default configure BT nShutdown to LOW state */
+       gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+       mdelay(1);
+       gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+       mdelay(1);
+       gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+
        /* platform specific disable */
        if (pdata->chip_disable)
                pdata->chip_disable(kim_gdata);
@@ -733,6 +746,20 @@ static int kim_probe(struct platform_device *pdev)
        /* refer to itself */
        kim_gdata->core_data->kim_data = kim_gdata;
 
+       /* Claim the chip enable nShutdown gpio from the system */
+       kim_gdata->nshutdown = pdata->nshutdown_gpio;
+       err = gpio_request(kim_gdata->nshutdown, "kim");
+       if (unlikely(err)) {
+               pr_err(" gpio %ld request failed ", kim_gdata->nshutdown);
+               return err;
+       }
+
+       /* Configure nShutdown GPIO as output=0 */
+       err = gpio_direction_output(kim_gdata->nshutdown, 0);
+       if (unlikely(err)) {
+               pr_err(" unable to configure gpio %ld", kim_gdata->nshutdown);
+               return err;
+       }
        /* get reference of pdev for request_firmware
         */
        kim_gdata->kim_pdev = pdev;
@@ -779,10 +806,18 @@ err_core_init:
 
 static int kim_remove(struct platform_device *pdev)
 {
+       /* free the GPIOs requested */
+       struct ti_st_plat_data  *pdata = pdev->dev.platform_data;
        struct kim_data_s       *kim_gdata;
 
        kim_gdata = dev_get_drvdata(&pdev->dev);
 
+       /* Free the Bluetooth/FM/GPIO
+        * nShutdown gpio from the system
+        */
+       gpio_free(pdata->nshutdown_gpio);
+       pr_info("nshutdown GPIO Freed");
+
        debugfs_remove_recursive(kim_debugfs_dir);
        sysfs_remove_group(&pdev->dev.kobj, &uim_attr_grp);
        pr_info("sysfs entries removed");
index de4c20b3936c0fb46d207f4a8896e97b4927be67..f8dd36102949a17bc9cdd7e1651053d5b2f01ed2 100644 (file)
@@ -50,8 +50,6 @@ struct mvsd_host {
        struct timer_list timer;
        struct mmc_host *mmc;
        struct device *dev;
-       struct resource *res;
-       int irq;
        struct clk *clk;
        int gpio_card_detect;
        int gpio_write_protect;
@@ -718,10 +716,6 @@ static int __init mvsd_probe(struct platform_device *pdev)
        if (!r || irq < 0 || !mvsd_data)
                return -ENXIO;
 
-       r = request_mem_region(r->start, SZ_1K, DRIVER_NAME);
-       if (!r)
-               return -EBUSY;
-
        mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
        if (!mmc) {
                ret = -ENOMEM;
@@ -731,8 +725,8 @@ static int __init mvsd_probe(struct platform_device *pdev)
        host = mmc_priv(mmc);
        host->mmc = mmc;
        host->dev = &pdev->dev;
-       host->res = r;
        host->base_clock = mvsd_data->clock / 2;
+       host->clk = ERR_PTR(-EINVAL);
 
        mmc->ops = &mvsd_ops;
 
@@ -752,7 +746,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
 
        spin_lock_init(&host->lock);
 
-       host->base = ioremap(r->start, SZ_4K);
+       host->base = devm_request_and_ioremap(&pdev->dev, r);
        if (!host->base) {
                ret = -ENOMEM;
                goto out;
@@ -765,44 +759,45 @@ static int __init mvsd_probe(struct platform_device *pdev)
 
        mvsd_power_down(host);
 
-       ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host);
+       ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
        if (ret) {
                pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq);
                goto out;
-       } else
-               host->irq = irq;
+       }
 
        /* Not all platforms can gate the clock, so it is not
           an error if the clock does not exists. */
-       host->clk = clk_get(&pdev->dev, NULL);
-       if (!IS_ERR(host->clk)) {
+       host->clk = devm_clk_get(&pdev->dev, NULL);
+       if (!IS_ERR(host->clk))
                clk_prepare_enable(host->clk);
-       }
 
        if (mvsd_data->gpio_card_detect) {
-               ret = gpio_request(mvsd_data->gpio_card_detect,
-                                  DRIVER_NAME " cd");
+               ret = devm_gpio_request_one(&pdev->dev,
+                                           mvsd_data->gpio_card_detect,
+                                           GPIOF_IN, DRIVER_NAME " cd");
                if (ret == 0) {
-                       gpio_direction_input(mvsd_data->gpio_card_detect);
                        irq = gpio_to_irq(mvsd_data->gpio_card_detect);
-                       ret = request_irq(irq, mvsd_card_detect_irq,
-                                         IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING,
-                                         DRIVER_NAME " cd", host);
+                       ret = devm_request_irq(&pdev->dev, irq,
+                                              mvsd_card_detect_irq,
+                                              IRQ_TYPE_EDGE_RISING |
+                                              IRQ_TYPE_EDGE_FALLING,
+                                              DRIVER_NAME " cd", host);
                        if (ret == 0)
                                host->gpio_card_detect =
                                        mvsd_data->gpio_card_detect;
                        else
-                               gpio_free(mvsd_data->gpio_card_detect);
+                               devm_gpio_free(&pdev->dev,
+                                              mvsd_data->gpio_card_detect);
                }
        }
        if (!host->gpio_card_detect)
                mmc->caps |= MMC_CAP_NEEDS_POLL;
 
        if (mvsd_data->gpio_write_protect) {
-               ret = gpio_request(mvsd_data->gpio_write_protect,
-                                  DRIVER_NAME " wp");
+               ret = devm_gpio_request_one(&pdev->dev,
+                                           mvsd_data->gpio_write_protect,
+                                           GPIOF_IN, DRIVER_NAME " wp");
                if (ret == 0) {
-                       gpio_direction_input(mvsd_data->gpio_write_protect);
                        host->gpio_write_protect =
                                mvsd_data->gpio_write_protect;
                }
@@ -824,26 +819,11 @@ static int __init mvsd_probe(struct platform_device *pdev)
        return 0;
 
 out:
-       if (host) {
-               if (host->irq)
-                       free_irq(host->irq, host);
-               if (host->gpio_card_detect) {
-                       free_irq(gpio_to_irq(host->gpio_card_detect), host);
-                       gpio_free(host->gpio_card_detect);
-               }
-               if (host->gpio_write_protect)
-                       gpio_free(host->gpio_write_protect);
-               if (host->base)
-                       iounmap(host->base);
-       }
-       if (r)
-               release_resource(r);
-       if (mmc)
-               if (!IS_ERR_OR_NULL(host->clk)) {
+       if (mmc) {
+               if (!IS_ERR(host->clk))
                        clk_disable_unprepare(host->clk);
-                       clk_put(host->clk);
-               }
                mmc_free_host(mmc);
+       }
 
        return ret;
 }
@@ -852,28 +832,16 @@ static int __exit mvsd_remove(struct platform_device *pdev)
 {
        struct mmc_host *mmc = platform_get_drvdata(pdev);
 
-       if (mmc) {
-               struct mvsd_host *host = mmc_priv(mmc);
+       struct mvsd_host *host = mmc_priv(mmc);
 
-               if (host->gpio_card_detect) {
-                       free_irq(gpio_to_irq(host->gpio_card_detect), host);
-                       gpio_free(host->gpio_card_detect);
-               }
-               mmc_remove_host(mmc);
-               free_irq(host->irq, host);
-               if (host->gpio_write_protect)
-                       gpio_free(host->gpio_write_protect);
-               del_timer_sync(&host->timer);
-               mvsd_power_down(host);
-               iounmap(host->base);
-               release_resource(host->res);
+       mmc_remove_host(mmc);
+       del_timer_sync(&host->timer);
+       mvsd_power_down(host);
+
+       if (!IS_ERR(host->clk))
+               clk_disable_unprepare(host->clk);
+       mmc_free_host(mmc);
 
-               if (!IS_ERR(host->clk)) {
-                       clk_disable_unprepare(host->clk);
-                       clk_put(host->clk);
-               }
-               mmc_free_host(mmc);
-       }
        platform_set_drvdata(pdev, NULL);
        return 0;
 }
index 571915dfb218382f0d29ea72136b05669b9271ea..f74b5adca64232dd4a8ab7d4a397281b8f02c7a0 100644 (file)
@@ -1060,26 +1060,6 @@ static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host)
        return 0;
 }
 
-static int sd_change_bank_voltage(struct realtek_pci_sdmmc *host, u8 voltage)
-{
-       struct rtsx_pcr *pcr = host->pcr;
-       int err;
-
-       if (voltage == SD_IO_3V3) {
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
-               if (err < 0)
-                       return err;
-       } else if (voltage == SD_IO_1V8) {
-               err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
-               if (err < 0)
-                       return err;
-       } else {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
 {
        struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -1098,11 +1078,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
        rtsx_pci_start_run(pcr);
 
        if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
-               voltage = SD_IO_3V3;
+               voltage = OUTPUT_3V3;
        else
-               voltage = SD_IO_1V8;
+               voltage = OUTPUT_1V8;
 
-       if (voltage == SD_IO_1V8) {
+       if (voltage == OUTPUT_1V8) {
                err = rtsx_pci_write_register(pcr,
                                SD30_DRIVE_SEL, 0x07, DRIVER_TYPE_B);
                if (err < 0)
@@ -1113,11 +1093,11 @@ static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
                        goto out;
        }
 
-       err = sd_change_bank_voltage(host, voltage);
+       err = rtsx_pci_switch_output_voltage(pcr, voltage);
        if (err < 0)
                goto out;
 
-       if (voltage == SD_IO_1V8) {
+       if (voltage == OUTPUT_1V8) {
                err = sd_wait_voltage_stable_2(host);
                if (err < 0)
                        goto out;
index 27f80cd8aef3770ee1d5c67c030088407c0967ba..46dcb54c32ec7994610e3c4a20fbaf85aa879b09 100644 (file)
@@ -272,6 +272,7 @@ config MTD_DOCG3
        tristate "M-Systems Disk-On-Chip G3"
        select BCH
        select BCH_CONST_PARAMS
+       select BITREVERSE
        ---help---
          This provides an MTD device driver for the M-Systems DiskOnChip
          G3 devices.
index 67cc73c18ddd6e0ca10d0438403d61436971b734..7901d72c92425dc41485947468231cf7776db372 100644 (file)
@@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
        resource_size_t res_size;
        struct mtd_part_parser_data ppdata;
        bool map_indirect;
-       const char *mtd_name;
+       const char *mtd_name = NULL;
 
        match = of_match_device(of_flash_match, &dev->dev);
        if (!match)
index 86c9a79b89b3e863f9377eebb8c9b6ae18500dd9..595de4012e71971c6453a5499334c28e6185390b 100644 (file)
@@ -17,8 +17,8 @@
 #include "bcm47xxnflash.h"
 
 /* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
- * shown 164 retries as maxiumum. */
-#define NFLASH_READY_RETRIES           1000
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES           10000
 
 #define NFLASH_SECTOR_SIZE             512
 
index 3502606f64806a06ad426a112174c0117482e8dc..feae55c7b88046b3169820449b002d0ae562a5c4 100644 (file)
@@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
 static const struct of_device_id davinci_nand_of_match[] = {
        {.compatible = "ti,davinci-nand", },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
 
 static struct davinci_nand_pdata
index 8323ac991ad154d7304958059cab37af6f2d6541..3766682a02898135ce4274b5a0baeac0eee772c9 100644 (file)
@@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
        int i;
        int val;
 
-       /* ONFI need to be probed in 8 bits mode */
-       WARN_ON(chip->options & NAND_BUSWIDTH_16);
+       /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
+       if (chip->options & NAND_BUSWIDTH_16) {
+               pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
+               return 0;
+       }
        /* Try ONFI for unknown chip or LP */
        chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
        if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
index 1877ed7ca0864ed4adfb614de862ed40dde730bd..1c9e09fbdff8346e99413e0313d03ed401f463e2 100644 (file)
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
                pr_info("%s: Setting primary slave to None.\n",
                        bond->dev->name);
                bond->primary_slave = NULL;
+               memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
                goto out;
        }
index 5233b8f58d773b6edb44f306f477674f05415084..2282b1ae97653d825fe6f37c2b22cf7ecd37a945 100644 (file)
@@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
 
        priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
                        IFX_WRITE_LOW_16BIT(mask));
+
+       /* According to C_CAN documentation, the reserved bit
+        * in IFx_MASK2 register is fixed 1
+        */
        priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
-                       IFX_WRITE_HIGH_16BIT(mask));
+                       IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
 
        priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
                        IFX_WRITE_LOW_16BIT(id));
@@ -960,7 +964,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
                break;
        case LEC_ACK_ERROR:
                netdev_dbg(dev, "ack error\n");
-               cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+               cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
                                CAN_ERR_PROT_LOC_ACK_DEL);
                break;
        case LEC_BIT1_ERROR:
@@ -973,7 +977,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
                break;
        case LEC_CRC_ERROR:
                netdev_dbg(dev, "CRC error\n");
-               cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+               cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
                                CAN_ERR_PROT_LOC_CRC_DEL);
                break;
        default:
index 7d1748575b1fffc45f38351ea3f071269545548a..5c314a961970b0041c7776da283979f00c4114f8 100644 (file)
@@ -560,7 +560,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
                stats->rx_errors++;
                break;
        case PCH_CRC_ERR:
-               cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+               cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
                               CAN_ERR_PROT_LOC_CRC_DEL;
                priv->can.can_stats.bus_error++;
                stats->rx_errors++;
index d84888f03d92e28995cd4087b511db1ca3c3a4f1..600ac7226e5ca1da007561bac5e0ffd3eebe5424 100644 (file)
@@ -339,8 +339,7 @@ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s)
  */
 static void peak_pciec_start_led_work(struct peak_pciec_card *card)
 {
-       if (!delayed_work_pending(&card->led_work))
-               schedule_delayed_work(&card->led_work, HZ);
+       schedule_delayed_work(&card->led_work, HZ);
 }
 
 /*
index f898c6363729b2a41be58c824f3f3bb145a95897..300581b24ff32860447e3646079024646e77a592 100644 (file)
@@ -746,12 +746,12 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
                }
                if (err_status & HECC_CANES_CRCE) {
                        hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
-                       cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+                       cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ |
                                        CAN_ERR_PROT_LOC_CRC_DEL;
                }
                if (err_status & HECC_CANES_ACKE) {
                        hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
-                       cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+                       cf->data[3] |= CAN_ERR_PROT_LOC_ACK |
                                        CAN_ERR_PROT_LOC_ACK_DEL;
                }
        }
index 66df936380859b55ab0b8b15595319bfeb98c98a..ffd8de28a76ad2c69a96daab3c59dbe29127faa1 100644 (file)
@@ -432,7 +432,7 @@ static int tc574_config(struct pcmcia_device *link)
        netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n",
                    cardname, dev->base_addr, dev->irq, dev->dev_addr);
        netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n",
-                   8 << config & Ram_size,
+                   8 << (config & Ram_size),
                    ram_split[(config & Ram_split) >> Ram_split_shift],
                    config & Autoselect ? "autoselect " : "");
 
index e49c0eff040b50a95bdacc050f8ff3a83f172a8e..a9481606bbcd713f6fa446325521b2309b64c856 100644 (file)
@@ -61,6 +61,7 @@ config BFIN_RX_DESC_NUM
 
 config BFIN_MAC_USE_HWSTAMP
        bool "Use IEEE 1588 hwstamp"
+       depends on BFIN_MAC && BF518
        select PTP_1588_CLOCK
        default y
        ---help---
index 56d3f697e0c7f0a05d8044a37f954c8d536f1c0a..0035c01660b6149bc3177ada829a39d4720c805b 100644 (file)
@@ -21,7 +21,7 @@
 
 #include "atl1c.h"
 
-#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
+#define ATL1C_DRV_VERSION "1.0.1.1-NAPI"
 char atl1c_driver_name[] = "atl1c";
 char atl1c_driver_version[] = ATL1C_DRV_VERSION;
 
@@ -1652,6 +1652,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
        u16 num_alloc = 0;
        u16 rfd_next_to_use, next_next;
        struct atl1c_rx_free_desc *rfd_desc;
+       dma_addr_t mapping;
 
        next_next = rfd_next_to_use = rfd_ring->next_to_use;
        if (++next_next == rfd_ring->count)
@@ -1678,9 +1679,18 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
                ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
                buffer_info->skb = skb;
                buffer_info->length = adapter->rx_buffer_len;
-               buffer_info->dma = pci_map_single(pdev, vir_addr,
+               mapping = pci_map_single(pdev, vir_addr,
                                                buffer_info->length,
                                                PCI_DMA_FROMDEVICE);
+               if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+                       dev_kfree_skb(skb);
+                       buffer_info->skb = NULL;
+                       buffer_info->length = 0;
+                       ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+                       netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+                       break;
+               }
+               buffer_info->dma = mapping;
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
                        ATL1C_PCIMAP_FROMDEVICE);
                rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -2015,7 +2025,29 @@ check_sum:
        return 0;
 }
 
-static void atl1c_tx_map(struct atl1c_adapter *adapter,
+static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
+                             struct atl1c_tpd_desc *first_tpd,
+                             enum atl1c_trans_queue type)
+{
+       struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+       struct atl1c_buffer *buffer_info;
+       struct atl1c_tpd_desc *tpd;
+       u16 first_index, index;
+
+       first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
+       index = first_index;
+       while (index != tpd_ring->next_to_use) {
+               tpd = ATL1C_TPD_DESC(tpd_ring, index);
+               buffer_info = &tpd_ring->buffer_info[index];
+               atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+               memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
+               if (++index == tpd_ring->count)
+                       index = 0;
+       }
+       tpd_ring->next_to_use = first_index;
+}
+
+static int atl1c_tx_map(struct atl1c_adapter *adapter,
                      struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
                        enum atl1c_trans_queue type)
 {
@@ -2040,7 +2072,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
                buffer_info->length = map_len;
                buffer_info->dma = pci_map_single(adapter->pdev,
                                        skb->data, hdr_len, PCI_DMA_TODEVICE);
-               ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
+               if (unlikely(pci_dma_mapping_error(adapter->pdev,
+                                                  buffer_info->dma)))
+                       goto err_dma;
+
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
                        ATL1C_PCIMAP_TODEVICE);
                mapped_len += map_len;
@@ -2062,6 +2097,10 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
                buffer_info->dma =
                        pci_map_single(adapter->pdev, skb->data + mapped_len,
                                        buffer_info->length, PCI_DMA_TODEVICE);
+               if (unlikely(pci_dma_mapping_error(adapter->pdev,
+                                                  buffer_info->dma)))
+                       goto err_dma;
+
                ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
                        ATL1C_PCIMAP_TODEVICE);
@@ -2083,6 +2122,9 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
                                                    frag, 0,
                                                    buffer_info->length,
                                                    DMA_TO_DEVICE);
+               if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
+                       goto err_dma;
+
                ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
                ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
                        ATL1C_PCIMAP_TODEVICE);
@@ -2095,6 +2137,13 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
        /* The last buffer info contain the skb address,
           so it will be free after unmap */
        buffer_info->skb = skb;
+
+       return 0;
+
+err_dma:
+       buffer_info->dma = 0;
+       buffer_info->length = 0;
+       return -1;
 }
 
 static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
@@ -2157,10 +2206,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
        if (skb_network_offset(skb) != ETH_HLEN)
                tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
 
-       atl1c_tx_map(adapter, skb, tpd, type);
-       atl1c_tx_queue(adapter, skb, tpd, type);
+       if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+               netif_info(adapter, tx_done, adapter->netdev,
+                          "tx-skb droppted due to dma error\n");
+               /* roll back tpd/buffer */
+               atl1c_tx_rollback(adapter, tpd, type);
+               spin_unlock_irqrestore(&adapter->tx_lock, flags);
+               dev_kfree_skb(skb);
+       } else {
+               atl1c_tx_queue(adapter, skb, tpd, type);
+               spin_unlock_irqrestore(&adapter->tx_lock, flags);
+       }
 
-       spin_unlock_irqrestore(&adapter->tx_lock, flags);
        return NETDEV_TX_OK;
 }
 
index 01588b66a38c0fea2d489b869d730fb8966c3419..a5edac8df67bb44576f61a70ab4ca9e825d644e7 100644 (file)
@@ -80,12 +80,37 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
                new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
        }
 
-       memcpy(&bp->bnx2x_txq[old_txdata_index],
-              &bp->bnx2x_txq[new_txdata_index],
+       memcpy(&bp->bnx2x_txq[new_txdata_index],
+              &bp->bnx2x_txq[old_txdata_index],
               sizeof(struct bnx2x_fp_txdata));
        to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
 }
 
+/**
+ * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
+ *
+ * @bp:        driver handle
+ * @delta:     number of eth queues which were not allocated
+ */
+static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
+{
+       int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
+
+       /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
+        * backward along the array could cause memory to be overriden
+        */
+       for (cos = 1; cos < bp->max_cos; cos++) {
+               for (i = 0; i < old_eth_num - delta; i++) {
+                       struct bnx2x_fastpath *fp = &bp->fp[i];
+                       int new_idx = cos * (old_eth_num - delta) + i;
+
+                       memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
+                              sizeof(struct bnx2x_fp_txdata));
+                       fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
+               }
+       }
+}
+
 int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 
 /* free skb in the packet ring at pos idx
@@ -479,13 +504,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
                                        tpa_info->parsing_flags, len_on_bd);
 
-               /* set for GRO */
-               if (fp->mode == TPA_MODE_GRO)
-                       skb_shinfo(skb)->gso_type =
-                           (GET_FLAG(tpa_info->parsing_flags,
-                                     PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
-                                               PRS_FLAG_OVERETH_IPV6) ?
-                               SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
+               skb_shinfo(skb)->gso_type =
+                       (GET_FLAG(tpa_info->parsing_flags,
+                                 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+                        PRS_FLAG_OVERETH_IPV6) ?
+                       SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
        }
 
 
@@ -3863,6 +3886,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
                int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
 
                WARN_ON(delta < 0);
+               bnx2x_shrink_eth_fp(bp, delta);
                if (CNIC_SUPPORT(bp))
                        /* move non eth FPs next to last eth FP
                         * must be done in that order
index 277f17e3c8f850bac954db7d99c55b7f06d194eb..a427b49a886ccea8a6d904daa473a59f5a676c5b 100644 (file)
@@ -2777,10 +2777,10 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
                } else if ((info->flow_type == UDP_V6_FLOW) &&
                           (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
-                       return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
                        DP(BNX2X_MSG_ETHTOOL,
                           "rss re-configured, UDP 4-tupple %s\n",
                           udp_rss_requested ? "enabled" : "disabled");
+                       return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
                } else {
                        return 0;
                }
index 940ef859dc6054338b884abce2c519e0ba4a89ea..5523da3afcdccd23f4b90c6191f722d02063c1d3 100644 (file)
@@ -127,6 +127,17 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
 
 struct workqueue_struct *bnx2x_wq;
 
+struct bnx2x_mac_vals {
+       u32 xmac_addr;
+       u32 xmac_val;
+       u32 emac_addr;
+       u32 emac_val;
+       u32 umac_addr;
+       u32 umac_val;
+       u32 bmac_addr;
+       u32 bmac_val[2];
+};
+
 enum bnx2x_board_type {
        BCM57710 = 0,
        BCM57711,
@@ -9420,12 +9431,19 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
                bnx2x_undi_int_disable_e1h(bp);
 }
 
-static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
+static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
+                                       struct bnx2x_mac_vals *vals)
 {
        u32 val, base_addr, offset, mask, reset_reg;
        bool mac_stopped = false;
        u8 port = BP_PORT(bp);
 
+       /* reset addresses as they also mark which values were changed */
+       vals->bmac_addr = 0;
+       vals->umac_addr = 0;
+       vals->xmac_addr = 0;
+       vals->emac_addr = 0;
+
        reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
 
        if (!CHIP_IS_E3(bp)) {
@@ -9447,14 +9465,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
                         */
                        wb_data[0] = REG_RD(bp, base_addr + offset);
                        wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
+                       vals->bmac_addr = base_addr + offset;
+                       vals->bmac_val[0] = wb_data[0];
+                       vals->bmac_val[1] = wb_data[1];
                        wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
-                       REG_WR(bp, base_addr + offset, wb_data[0]);
-                       REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
+                       REG_WR(bp, vals->bmac_addr, wb_data[0]);
+                       REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
 
                }
                BNX2X_DEV_INFO("Disable emac Rx\n");
-               REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
-
+               vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
+               vals->emac_val = REG_RD(bp, vals->emac_addr);
+               REG_WR(bp, vals->emac_addr, 0);
                mac_stopped = true;
        } else {
                if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
@@ -9465,14 +9487,18 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
                               val & ~(1 << 1));
                        REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
                               val | (1 << 1));
-                       REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
+                       vals->xmac_addr = base_addr + XMAC_REG_CTRL;
+                       vals->xmac_val = REG_RD(bp, vals->xmac_addr);
+                       REG_WR(bp, vals->xmac_addr, 0);
                        mac_stopped = true;
                }
                mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
                if (mask & reset_reg) {
                        BNX2X_DEV_INFO("Disable umac Rx\n");
                        base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
-                       REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
+                       vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
+                       vals->umac_val = REG_RD(bp, vals->umac_addr);
+                       REG_WR(bp, vals->umac_addr, 0);
                        mac_stopped = true;
                }
        }
@@ -9664,12 +9690,16 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
 {
        u32 reset_reg, tmp_reg = 0, rc;
        bool prev_undi = false;
+       struct bnx2x_mac_vals mac_vals;
+
        /* It is possible a previous function received 'common' answer,
         * but hasn't loaded yet, therefore creating a scenario of
         * multiple functions receiving 'common' on the same path.
         */
        BNX2X_DEV_INFO("Common unload Flow\n");
 
+       memset(&mac_vals, 0, sizeof(mac_vals));
+
        if (bnx2x_prev_is_path_marked(bp))
                return bnx2x_prev_mcp_done(bp);
 
@@ -9680,7 +9710,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
                u32 timer_count = 1000;
 
                /* Close the MAC Rx to prevent BRB from filling up */
-               bnx2x_prev_unload_close_mac(bp);
+               bnx2x_prev_unload_close_mac(bp, &mac_vals);
+
+               /* close LLH filters towards the BRB */
+               bnx2x_set_rx_filter(&bp->link_params, 0);
 
                /* Check if the UNDI driver was previously loaded
                 * UNDI driver initializes CID offset for normal bell to 0x7
@@ -9727,6 +9760,17 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
        /* No packets are in the pipeline, path is ready for reset */
        bnx2x_reset_common(bp);
 
+       if (mac_vals.xmac_addr)
+               REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
+       if (mac_vals.umac_addr)
+               REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
+       if (mac_vals.emac_addr)
+               REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
+       if (mac_vals.bmac_addr) {
+               REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
+               REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
+       }
+
        rc = bnx2x_prev_mark_path(bp, prev_undi);
        if (rc) {
                bnx2x_prev_mcp_done(bp);
index 78ea90c40e1902af09272a80b09010da52ae6c6b..bdb086934cd95de72cd1aa02cf1cb4b6baaaafc3 100644 (file)
@@ -1283,14 +1283,26 @@ static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
        return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 }
 
-#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
-       tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
-                            MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
-                            MII_TG3_AUXCTL_ACTL_TX_6DB)
+static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
+{
+       u32 val;
+       int err;
 
-#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
-       tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
-                            MII_TG3_AUXCTL_ACTL_TX_6DB);
+       err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+
+       if (err)
+               return err;
+       if (enable)
+
+               val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+       else
+               val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
+
+       err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+                                  val | MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+       return err;
+}
 
 static int tg3_bmcr_reset(struct tg3 *tp)
 {
@@ -2223,7 +2235,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
 
        otp = tp->phy_otp;
 
-       if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+       if (tg3_phy_toggle_auxctl_smdsp(tp, true))
                return;
 
        phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
@@ -2248,7 +2260,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
              ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
        tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
 
-       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+       tg3_phy_toggle_auxctl_smdsp(tp, false);
 }
 
 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -2284,9 +2296,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
 
        if (!tp->setlpicnt) {
                if (current_link_up == 1 &&
-                  !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+                  !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                        tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
-                       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+                       tg3_phy_toggle_auxctl_smdsp(tp, false);
                }
 
                val = tr32(TG3_CPMU_EEE_MODE);
@@ -2302,11 +2314,11 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
             tg3_flag(tp, 57765_CLASS)) &&
-           !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+           !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                val = MII_TG3_DSP_TAP26_ALNOKO |
                      MII_TG3_DSP_TAP26_RMRXSTO;
                tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
-               TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+               tg3_phy_toggle_auxctl_smdsp(tp, false);
        }
 
        val = tr32(TG3_CPMU_EEE_MODE);
@@ -2450,7 +2462,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
                tg3_writephy(tp, MII_CTRL1000,
                             CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 
-               err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+               err = tg3_phy_toggle_auxctl_smdsp(tp, true);
                if (err)
                        return err;
 
@@ -2471,7 +2483,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
        tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 
-       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+       tg3_phy_toggle_auxctl_smdsp(tp, false);
 
        tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 
@@ -2572,10 +2584,10 @@ static int tg3_phy_reset(struct tg3 *tp)
 
 out:
        if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
-           !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+           !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                tg3_phydsp_write(tp, 0x201f, 0x2aaa);
                tg3_phydsp_write(tp, 0x000a, 0x0323);
-               TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+               tg3_phy_toggle_auxctl_smdsp(tp, false);
        }
 
        if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
@@ -2584,14 +2596,14 @@ out:
        }
 
        if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
-               if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+               if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                        tg3_phydsp_write(tp, 0x000a, 0x310b);
                        tg3_phydsp_write(tp, 0x201f, 0x9506);
                        tg3_phydsp_write(tp, 0x401f, 0x14e2);
-                       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+                       tg3_phy_toggle_auxctl_smdsp(tp, false);
                }
        } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
-               if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+               if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
                        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
                        if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
                                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
@@ -2600,7 +2612,7 @@ out:
                        } else
                                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 
-                       TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+                       tg3_phy_toggle_auxctl_smdsp(tp, false);
                }
        }
 
@@ -4009,7 +4021,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
        tw32(TG3_CPMU_EEE_MODE,
             tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 
-       err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+       err = tg3_phy_toggle_auxctl_smdsp(tp, true);
        if (!err) {
                u32 err2;
 
@@ -4042,7 +4054,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
                                                 MII_TG3_DSP_CH34TP2_HIBW01);
                }
 
-               err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+               err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
                if (!err)
                        err = err2;
        }
@@ -6950,6 +6962,9 @@ static void tg3_poll_controller(struct net_device *dev)
        int i;
        struct tg3 *tp = netdev_priv(dev);
 
+       if (tg3_irq_sync(tp))
+               return;
+
        for (i = 0; i < tp->irq_cnt; i++)
                tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
 }
@@ -16367,6 +16382,7 @@ static int tg3_init_one(struct pci_dev *pdev,
        tp->pm_cap = pm_cap;
        tp->rx_mode = TG3_DEF_RX_MODE;
        tp->tx_mode = TG3_DEF_TX_MODE;
+       tp->irq_sync = 1;
 
        if (tg3_debug > 0)
                tp->msg_enable = tg3_debug;
index a9b0830fb39d90b3227fe14741c8a4a3f09dc2d6..b9d4bb9530e5da5e2f30958a2fad6e5347b8af99 100644 (file)
@@ -693,6 +693,11 @@ static int macb_poll(struct napi_struct *napi, int budget)
                 * get notified when new packets arrive.
                 */
                macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+
+               /* Packets received while interrupts were disabled */
+               status = macb_readl(bp, RSR);
+               if (unlikely(status))
+                       napi_reschedule(napi);
        }
 
        /* TODO: Handle errors */
index b407043ce9b0df3c1286fb3c0051f012cfa5e5b0..f7f02900f6508f0c8a7f8b6b41558aa85550e291 100644 (file)
@@ -548,6 +548,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
                return -1;
        }
 
+       /* All frames should fit into a single buffer */
+       if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
+               return -1;
+
        /* Check if packet has checksum already */
        if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
                !(ext_status & RXDESC_IP_PAYLOAD_MASK))
index f0718e1a83696b0e2ec0b8fad9121288ecda81bc..c306df7d45684ce1fcd2f25012e8100da98abdd2 100644 (file)
@@ -1994,9 +1994,20 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 {
        const struct port_info *pi = netdev_priv(dev);
        struct adapter *adap = pi->adapter;
-
-       return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
-                       c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+       struct sge_rspq *q;
+       int i;
+       int r = 0;
+
+       for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
+               q = &adap->sge.ethrxq[i].rspq;
+               r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
+                       c->rx_max_coalesced_frames);
+               if (r) {
+                       dev_err(&dev->dev, "failed to set coalesce %d\n", r);
+                       break;
+               }
+       }
+       return r;
 }
 
 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
index 3bc1912afba936a629b16c7193b2ccacedbe9f76..f1b3df167ff29a8c68fccce991e43f714d82daf3 100644 (file)
 
 #define DRV_VER                        "4.4.161.0u"
 #define DRV_NAME               "be2net"
-#define BE_NAME                        "ServerEngines BladeEngine2 10Gbps NIC"
-#define BE3_NAME               "ServerEngines BladeEngine3 10Gbps NIC"
-#define OC_NAME                        "Emulex OneConnect 10Gbps NIC"
+#define BE_NAME                        "Emulex BladeEngine2"
+#define BE3_NAME               "Emulex BladeEngine3"
+#define OC_NAME                        "Emulex OneConnect"
 #define OC_NAME_BE             OC_NAME "(be3)"
 #define OC_NAME_LANCER         OC_NAME "(Lancer)"
 #define OC_NAME_SH             OC_NAME "(Skyhawk)"
-#define DRV_DESC               "ServerEngines BladeEngine 10Gbps NIC Driver"
+#define DRV_DESC               "Emulex OneConnect 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID           0x19a2
 #define EMULEX_VENDOR_ID       0x10df
@@ -190,6 +190,7 @@ struct be_eq_obj {
 
        u8 idx;                 /* array index */
        u16 tx_budget;
+       u16 spurious_intr;
        struct napi_struct napi;
        struct be_adapter *adapter;
 } ____cacheline_aligned_in_smp;
index 9dca22be81253bc1a66f5e9e136c4462acdeaed9..4d6f3c54427a903f34189fb7abe74795028fb46f 100644 (file)
@@ -25,7 +25,7 @@
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
-MODULE_AUTHOR("ServerEngines Corporation");
+MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 
 static unsigned int num_vfs;
@@ -2026,19 +2026,30 @@ static irqreturn_t be_intx(int irq, void *dev)
        struct be_adapter *adapter = eqo->adapter;
        int num_evts = 0;
 
-       /* On Lancer, clear-intr bit of the EQ DB does not work.
-        * INTx is de-asserted only on notifying num evts.
+       /* IRQ is not expected when NAPI is scheduled as the EQ
+        * will not be armed.
+        * But, this can happen on Lancer INTx where it takes
+        * a while to de-assert INTx or in BE2 where occasionaly
+        * an interrupt may be raised even when EQ is unarmed.
+        * If NAPI is already scheduled, then counting & notifying
+        * events will orphan them.
         */
-       if (lancer_chip(adapter))
+       if (napi_schedule_prep(&eqo->napi)) {
                num_evts = events_get(eqo);
+               __napi_schedule(&eqo->napi);
+               if (num_evts)
+                       eqo->spurious_intr = 0;
+       }
+       be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
 
-       /* The EQ-notify may not de-assert INTx rightaway, causing
-        * the ISR to be invoked again. So, return HANDLED even when
-        * num_evts is zero.
+       /* Return IRQ_HANDLED only for the the first spurious intr
+        * after a valid intr to stop the kernel from branding
+        * this irq as a bad one!
         */
-       be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
-       napi_schedule(&eqo->napi);
-       return IRQ_HANDLED;
+       if (num_evts || eqo->spurious_intr++ == 0)
+               return IRQ_HANDLED;
+       else
+               return IRQ_NONE;
 }
 
 static irqreturn_t be_msix(int irq, void *dev)
index 02a12b69555f6fb5d71fe2089992f5fad5e354cc..4dab6fc265a27450a26664e1417c804961358a4a 100644 (file)
 #define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
 #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
 #define E1000_CTRL_LANPHYPC_VALUE    0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_MEHE     0x00080000  /* Memory Error Handling Enable */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
 #define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
 
 #define E1000_PBS_16K E1000_PBA_16K
 
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK       0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK     0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT    8
+#define E1000_PBECCSTS_ECC_ENABLE              0x00010000
+
 #define IFS_MAX       80
 #define IFS_MIN       40
 #define IFS_RATIO     4
 #define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
 #define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
 #define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_ECCER         0x00400000 /* Uncorrectable ECC Error */
 #define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
 #define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */
 #define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
 #define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
 #define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
 #define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_ECCER     E1000_ICR_ECCER     /* Uncorrectable ECC Error */
 #define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
 #define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
 #define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */
index 6782a2eea1bcbfb893681b8d26428d8b0410981b..7e95f221d60b15e58b17d177ee870ecdfd2b0352 100644 (file)
@@ -309,6 +309,8 @@ struct e1000_adapter {
 
        struct napi_struct napi;
 
+       unsigned int uncorr_errors;     /* uncorrectable ECC errors */
+       unsigned int corr_errors;       /* correctable ECC errors */
        unsigned int restart_queue;
        u32 txd_cmd;
 
index f95bc6ee1c227c89a2e158fde491dc03ef173b4b..fd4772a2691c510fb83e5e2c9b9211c55f116465 100644 (file)
@@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("dropped_smbus", stats.mgpdc),
        E1000_STAT("rx_dma_failed", rx_dma_failed),
        E1000_STAT("tx_dma_failed", tx_dma_failed),
+       E1000_STAT("uncorr_ecc_errors", uncorr_errors),
+       E1000_STAT("corr_ecc_errors", corr_errors),
 };
 
 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
index cf217777586c246561120c0877516f22d3ff6b89..b88676ff3d86d19f365f94e4a9cc973c7ea3f092 100644 (file)
@@ -77,6 +77,7 @@ enum e1e_registers {
 #define E1000_POEMB    E1000_PHY_CTRL  /* PHY OEM Bits */
        E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
        E1000_PBS      = 0x01008, /* Packet Buffer Size */
+       E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
        E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
        E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */
        E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */
index 976336547607e31caef68c8eaf528ddac78682da..24d9f61956f095ccfd5937ebba5ed35e8c2dc5d0 100644 (file)
@@ -3624,6 +3624,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
        if (hw->mac.type == e1000_ich8lan)
                reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
        ew32(RFCTL, reg);
+
+       /* Enable ECC on Lynxpoint */
+       if (hw->mac.type == e1000_pch_lpt) {
+               reg = er32(PBECCSTS);
+               reg |= E1000_PBECCSTS_ECC_ENABLE;
+               ew32(PBECCSTS, reg);
+
+               reg = er32(CTRL);
+               reg |= E1000_CTRL_MEHE;
+               ew32(CTRL, reg);
+       }
 }
 
 /**
index fbf75fdca99422743f35d3d9a114fcdef235563f..643c883dd795a48384c95dcd0795f3bbe95f6291 100644 (file)
@@ -1678,6 +1678,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+       /* Reset on uncorrectable ECC error */
+       if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+               u32 pbeccsts = er32(PBECCSTS);
+
+               adapter->corr_errors +=
+                   pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+               adapter->uncorr_errors +=
+                   (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+                   E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+               /* Do the reset outside of interrupt context */
+               schedule_work(&adapter->reset_task);
+
+               /* return immediately since reset is imminent */
+               return IRQ_HANDLED;
+       }
+
        if (napi_schedule_prep(&adapter->napi)) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
@@ -1741,6 +1758,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
+       /* Reset on uncorrectable ECC error */
+       if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
+               u32 pbeccsts = er32(PBECCSTS);
+
+               adapter->corr_errors +=
+                   pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+               adapter->uncorr_errors +=
+                   (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+                   E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+
+               /* Do the reset outside of interrupt context */
+               schedule_work(&adapter->reset_task);
+
+               /* return immediately since reset is imminent */
+               return IRQ_HANDLED;
+       }
+
        if (napi_schedule_prep(&adapter->napi)) {
                adapter->total_tx_bytes = 0;
                adapter->total_tx_packets = 0;
@@ -2104,6 +2138,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
        if (adapter->msix_entries) {
                ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
                ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+       } else if (hw->mac.type == e1000_pch_lpt) {
+               ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
        } else {
                ew32(IMS, IMS_ENABLE_MASK);
        }
@@ -4251,6 +4287,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
        adapter->stats.mgptc += er32(MGTPTC);
        adapter->stats.mgprc += er32(MGTPRC);
        adapter->stats.mgpdc += er32(MGTPDC);
+
+       /* Correctable ECC Errors */
+       if (hw->mac.type == e1000_pch_lpt) {
+               u32 pbeccsts = er32(PBECCSTS);
+               adapter->corr_errors +=
+                   pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
+               adapter->uncorr_errors +=
+                   (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
+                   E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
+       }
 }
 
 /**
index f3a632bf8d96f8bd76983e8990c80f1cefe984ac..687c83d1bdabb7589ac4492e9763b863ac5fcef0 100644 (file)
@@ -32,7 +32,7 @@
 
 obj-$(CONFIG_IXGBE) += ixgbe.o
 
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
+ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
               ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
 
@@ -40,4 +40,5 @@ ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
 
 ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
+ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
index 50aa546b8c7a589100be6bd88eac590adf314d98..3504686d3af515ed7f5e04924be29cd9098f2061 100644 (file)
@@ -24,9 +24,6 @@
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
-
-#ifdef CONFIG_DEBUG_FS
-
 #include <linux/debugfs.h>
 #include <linux/module.h>
 
@@ -277,5 +274,3 @@ void ixgbe_dbg_exit(void)
 {
        debugfs_remove_recursive(ixgbe_dbg_root);
 }
-
-#endif /* CONFIG_DEBUG_FS */
index 20a5af6d87d0e8747027964ba01a03bd7e3ca24c..b3e3294cfe53320f4b70a9484e39bd317583f8fc 100644 (file)
@@ -1401,6 +1401,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
        /* set gso_size to avoid messing up TCP MSS */
        skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
                                                 IXGBE_CB(skb)->append_cnt);
+       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 }
 
 static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
index 1a751c9d09c47f319e45d76a9c2bbad8573016d3..bb9256a1b0a9b467412ce127911ed3cfb53a7053 100644 (file)
@@ -660,11 +660,11 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
                tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
+               tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
                tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
+               tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
                break;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
index 2b799f4f1c37100f06eab93aef628d4124182230..6771b69f40d562bf58909a1f806bddd1ace9ea65 100644 (file)
@@ -630,10 +630,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->tx_csum++;
        }
 
-       /* Copy dst mac address to wqe */
-       ethh = (struct ethhdr *)skb->data;
-       tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
-       tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+       if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+               /* Copy dst mac address to wqe. This allows loopback in eSwitch,
+                * so that VFs and PF can communicate with each other
+                */
+               ethh = (struct ethhdr *)skb->data;
+               tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
+               tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
+       }
+
        /* Handle LSO (TSO) packets */
        if (lso_header_size) {
                /* Mark opcode as LSO */
index e1bafffbc3b1d261f4cf0972dd3f95c942c5fc3d..5163af314990d2793d529b7db24e752490eb9645 100644 (file)
@@ -380,7 +380,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                }
        }
 
-       if ((dev_cap->flags &
+       if ((dev->caps.flags &
            (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
            mlx4_is_master(dev))
                dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
@@ -1790,15 +1790,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
        int i;
 
        if (msi_x) {
-               /* In multifunction mode each function gets 2 msi-X vectors
-                * one for data path completions anf the other for asynch events
-                * or command completions */
-               if (mlx4_is_mfunc(dev)) {
-                       nreq = 2;
-               } else {
-                       nreq = min_t(int, dev->caps.num_eqs -
-                                    dev->caps.reserved_eqs, nreq);
-               }
+               nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
+                            nreq);
 
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
index bc165f4d0f65cebb6cc82e3aa99590863963f767..695667d471a1bef8dc81dd8034a593a118993e07 100644 (file)
@@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
                                         buffrag->length, PCI_DMA_TODEVICE);
                        buffrag->dma = 0ULL;
                }
-               for (j = 0; j < cmd_buf->frag_count; j++) {
+               for (j = 1; j < cmd_buf->frag_count; j++) {
                        buffrag++;
                        if (buffrag->dma) {
                                pci_unmap_page(adapter->pdev, buffrag->dma,
index 6098fd4adfeb89ef5ed9c15407799ddbdcf02e7a..69e321a650779c9d3bae975a273035b922fca08f 100644 (file)
@@ -1963,10 +1963,12 @@ unwind:
        while (--i >= 0) {
                nf = &pbuf->frag_array[i+1];
                pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+               nf->dma = 0ULL;
        }
 
        nf = &pbuf->frag_array[0];
        pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+       nf->dma = 0ULL;
 
 out_err:
        return -ENOMEM;
index 6f82812d0fab9e2e5697b3770d2c3c524b5daeab..09aa310b619475a82c6dffc81752f8584ff9ddee 100644 (file)
@@ -986,8 +986,13 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
        th->seq = htonl(seq_number);
        length = skb->len;
 
-       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+       if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
                skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+               else
+                       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       }
 
        if (vid != 0xffff)
                __vlan_hwaccel_put_tag(skb, vid);
index f80cd975daed2b84a31af47db58b828d28139638..3e73742024b0cb5af229df1336a9a3aeab653b1f 100644 (file)
@@ -4678,7 +4678,7 @@ static int qlge_probe(struct pci_dev *pdev,
        qdev = netdev_priv(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+               NETIF_F_TSO | NETIF_F_TSO_ECN |
                NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
        ndev->features = ndev->hw_features |
                NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
index ed96f309bca8e030a0c098df8b8e728314f04bb0..998974f7874253f67880009d96f1e80f4788a408 100644 (file)
@@ -450,7 +450,6 @@ enum rtl8168_registers {
 #define PWM_EN                         (1 << 22)
 #define RXDV_GATED_EN                  (1 << 19)
 #define EARLY_TALLY_EN                 (1 << 16)
-#define FORCE_CLK                      (1 << 15) /* force clock request */
 };
 
 enum rtl_register_content {
@@ -514,7 +513,6 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
-       ClkReqEn        = (1 << 7),     /* Clock Request Enable */
        MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
@@ -535,7 +533,6 @@ enum rtl_register_content {
        Spi_en          = (1 << 3),
        LanWake         = (1 << 1),     /* LanWake enable/disable */
        PMEStatus       = (1 << 0),     /* PME status can be reset by PCI RST# */
-       ASPM_en         = (1 << 0),     /* ASPM enable */
 
        /* TBICSR p.28 */
        TBIReset        = 0x80000000,
@@ -684,7 +681,6 @@ enum features {
        RTL_FEATURE_WOL         = (1 << 0),
        RTL_FEATURE_MSI         = (1 << 1),
        RTL_FEATURE_GMII        = (1 << 2),
-       RTL_FEATURE_FW_LOADED   = (1 << 3),
 };
 
 struct rtl8169_counters {
@@ -1826,8 +1822,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
 
        if (opts2 & RxVlanTag)
                __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
-
-       desc->opts2 = 0;
 }
 
 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2391,10 +2385,8 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
        struct rtl_fw *rtl_fw = tp->rtl_fw;
 
        /* TODO: release firmware once rtl_phy_write_fw signals failures. */
-       if (!IS_ERR_OR_NULL(rtl_fw)) {
+       if (!IS_ERR_OR_NULL(rtl_fw))
                rtl_phy_write_fw(tp, rtl_fw);
-               tp->features |= RTL_FEATURE_FW_LOADED;
-       }
 }
 
 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2405,31 +2397,6 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
                rtl_apply_firmware(tp);
 }
 
-static void r810x_aldps_disable(struct rtl8169_private *tp)
-{
-       rtl_writephy(tp, 0x1f, 0x0000);
-       rtl_writephy(tp, 0x18, 0x0310);
-       msleep(100);
-}
-
-static void r810x_aldps_enable(struct rtl8169_private *tp)
-{
-       if (!(tp->features & RTL_FEATURE_FW_LOADED))
-               return;
-
-       rtl_writephy(tp, 0x1f, 0x0000);
-       rtl_writephy(tp, 0x18, 0x8310);
-}
-
-static void r8168_aldps_enable_1(struct rtl8169_private *tp)
-{
-       if (!(tp->features & RTL_FEATURE_FW_LOADED))
-               return;
-
-       rtl_writephy(tp, 0x1f, 0x0000);
-       rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
-}
-
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
 {
        static const struct phy_reg phy_reg_init[] = {
@@ -3220,8 +3187,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
        rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
        rtl_writephy(tp, 0x1f, 0x0000);
 
-       r8168_aldps_enable_1(tp);
-
        /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
        rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
 }
@@ -3296,8 +3261,6 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy(tp, 0x05, 0x8b85);
        rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
        rtl_writephy(tp, 0x1f, 0x0000);
-
-       r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3305,8 +3268,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
        rtl_apply_firmware(tp);
 
        rtl8168f_hw_phy_config(tp);
-
-       r8168_aldps_enable_1(tp);
 }
 
 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3404,8 +3365,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
        rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
        rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
        rtl_writephy(tp, 0x1f, 0x0000);
-
-       r8168_aldps_enable_1(tp);
 }
 
 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3491,19 +3450,21 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
        };
 
        /* Disable ALDPS before ram code */
-       r810x_aldps_disable(tp);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(100);
 
        rtl_apply_firmware(tp);
 
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
-
-       r810x_aldps_enable(tp);
 }
 
 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
 {
        /* Disable ALDPS before setting firmware */
-       r810x_aldps_disable(tp);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(20);
 
        rtl_apply_firmware(tp);
 
@@ -3513,8 +3474,6 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy(tp, 0x10, 0x401f);
        rtl_writephy(tp, 0x19, 0x7030);
        rtl_writephy(tp, 0x1f, 0x0000);
-
-       r810x_aldps_enable(tp);
 }
 
 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3527,7 +3486,9 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
        };
 
        /* Disable ALDPS before ram code */
-       r810x_aldps_disable(tp);
+       rtl_writephy(tp, 0x1f, 0x0000);
+       rtl_writephy(tp, 0x18, 0x0310);
+       msleep(100);
 
        rtl_apply_firmware(tp);
 
@@ -3535,8 +3496,6 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 
        rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
-       r810x_aldps_enable(tp);
 }
 
 static void rtl_hw_phy_config(struct net_device *dev)
@@ -5053,6 +5012,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, EarlySize);
 
+       rtl_disable_clock_request(pdev);
+
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
 
@@ -5061,8 +5022,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
        RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
-       RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+       RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5087,12 +5047,13 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 
        RTL_W8(MaxTxPacketSize, EarlySize);
 
+       rtl_disable_clock_request(pdev);
+
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
-       RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
-       RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+       RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
+       RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
 }
 
 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5149,10 +5110,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
        rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
 
        RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-       RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
+       RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
        RTL_W8(MaxTxPacketSize, EarlySize);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
 
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5368,9 +5327,6 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
 
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
-       RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
 
        rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 }
@@ -5396,9 +5352,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
 
        RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
-       RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
 
        rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
 
@@ -5420,10 +5373,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
 
-       RTL_W32(MISC,
-               (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
-       RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
-       RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
+       RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
 }
@@ -6064,8 +6014,6 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
                            !(status & (RxRWT | RxFOVF)) &&
                            (dev->features & NETIF_F_RXALL))
                                goto process_pkt;
-
-                       rtl8169_mark_to_asic(desc, rx_buf_sz);
                } else {
                        struct sk_buff *skb;
                        dma_addr_t addr;
@@ -6086,16 +6034,14 @@ process_pkt:
                        if (unlikely(rtl8169_fragmented_frame(status))) {
                                dev->stats.rx_dropped++;
                                dev->stats.rx_length_errors++;
-                               rtl8169_mark_to_asic(desc, rx_buf_sz);
-                               continue;
+                               goto release_descriptor;
                        }
 
                        skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
                                                  tp, pkt_size, addr);
-                       rtl8169_mark_to_asic(desc, rx_buf_sz);
                        if (!skb) {
                                dev->stats.rx_dropped++;
-                               continue;
+                               goto release_descriptor;
                        }
 
                        rtl8169_rx_csum(skb, status);
@@ -6111,13 +6057,10 @@ process_pkt:
                        tp->rx_stats.bytes += pkt_size;
                        u64_stats_update_end(&tp->rx_stats.syncp);
                }
-
-               /* Work around for AMD plateform. */
-               if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
-                   (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
-                       desc->opts2 = 0;
-                       cur_rx++;
-               }
+release_descriptor:
+               desc->opts2 = 0;
+               wmb();
+               rtl8169_mark_to_asic(desc, rx_buf_sz);
        }
 
        count = cur_rx - tp->cur_rx;
index f07c0612abf6843ffe7b41a8ecc2623aff42298f..b75f4b286895021fd587b880a66c17aa4251780b 100644 (file)
@@ -69,7 +69,7 @@
 
 #undef STMMAC_XMIT_DEBUG
 /*#define STMMAC_XMIT_DEBUG*/
-#ifdef STMMAC_TX_DEBUG
+#ifdef STMMAC_XMIT_DEBUG
 #define TX_DBG(fmt, args...)  printk(fmt, ## args)
 #else
 #define TX_DBG(fmt, args...)  do { } while (0)
index 0376a5e6b2bf9c0584e45bf2e34c6f5ee8608033..0b9829fe3eea7344886060d826730aa13e6eba3e 100644 (file)
@@ -188,8 +188,6 @@ int stmmac_mdio_register(struct net_device *ndev)
                goto bus_register_fail;
        }
 
-       priv->mii = new_bus;
-
        found = 0;
        for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
                struct phy_device *phydev = new_bus->phy_map[addr];
@@ -237,8 +235,14 @@ int stmmac_mdio_register(struct net_device *ndev)
                }
        }
 
-       if (!found)
+       if (!found) {
                pr_warning("%s: No PHY found\n", ndev->name);
+               mdiobus_unregister(new_bus);
+               mdiobus_free(new_bus);
+               return -ENODEV;
+       }
+
+       priv->mii = new_bus;
 
        return 0;
 
index 7992b3e05d3dd5a48f7701114a2264d23f3ce4e8..78ace59efd297531001c171d47351c2fe9bb8ef9 100644 (file)
@@ -1801,7 +1801,7 @@ static void rhine_tx(struct net_device *dev)
                                         rp->tx_skbuff[entry]->len,
                                         PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb_irq(rp->tx_skbuff[entry]);
+               dev_kfree_skb(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
                entry = (++rp->dirty_tx) % TX_RING_SIZE;
        }
@@ -2010,11 +2010,7 @@ static void rhine_slow_event_task(struct work_struct *work)
        if (intr_status & IntrPCIErr)
                netif_warn(rp, hw, dev, "PCI error\n");
 
-       napi_disable(&rp->napi);
-       rhine_irq_disable(rp);
-       /* Slow and safe. Consider __napi_schedule as a replacement ? */
-       napi_enable(&rp->napi);
-       napi_schedule(&rp->napi);
+       iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
 
 out_unlock:
        mutex_unlock(&rp->task_lock);
index 5778a4ae116466d637288fe668fb990ac623fb6e..122d60c0481b0d8eab2a95b56bc495665390a0ec 100644 (file)
@@ -27,7 +27,7 @@ config XILINX_EMACLITE
 
 config XILINX_AXI_EMAC
        tristate "Xilinx 10/100/1000 AXI Ethernet support"
-       depends on (PPC32 || MICROBLAZE)
+       depends on MICROBLAZE
        select PHYLIB
        ---help---
          This driver supports the 10/100/1000 Ethernet from Xilinx for the
index d9f69b82cc4ff638981798e23bbe3c0e9e6329f1..6f47100e58d71584ea13bb0c83c1607ca967a58a 100644 (file)
@@ -1590,7 +1590,7 @@ static int axienet_of_probe(struct platform_device *op)
        lp->rx_irq = irq_of_parse_and_map(np, 1);
        lp->tx_irq = irq_of_parse_and_map(np, 0);
        of_node_put(np);
-       if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+       if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
                dev_err(&op->dev, "could not determine irqs\n");
                ret = -ENOMEM;
                goto err_iounmap_2;
index 5fd6f4674326f0d3236ae8a97127b842ffffa008..e6fe0d80d6122fcd8dfcf7bb65fa71dac68c1e73 100644 (file)
@@ -84,7 +84,7 @@ struct hv_netvsc_packet {
 };
 
 struct netvsc_device_info {
-       unsigned char mac_adr[6];
+       unsigned char mac_adr[ETH_ALEN];
        bool link_state;        /* 0 - link up, 1 - link down */
        int  ring_size;
 };
index f825a629a699cfe5fac73803353da4b47ca18974..8264f0ef7692f5c832a12336a1188f9c7620190f 100644 (file)
@@ -349,7 +349,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
        struct net_device_context *ndevctx = netdev_priv(ndev);
        struct hv_device *hdev =  ndevctx->device_ctx;
        struct sockaddr *addr = p;
-       char save_adr[14];
+       char save_adr[ETH_ALEN];
        unsigned char save_aatype;
        int err;
 
index 81f8f9e31db510892acae3cdb39a799de7e21be9..fcbf680c3e62f73af4933896641045de34f1f68c 100644 (file)
@@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
 
        skb_orphan(skb);
 
+       /* Before queueing this packet to netif_rx(),
+        * make sure dst is refcounted.
+        */
+       skb_dst_force(skb);
+
        skb->protocol = eth_type_trans(skb, dev);
 
        /* it's OK to use per_cpu_ptr() because BHs are off */
index 68a43fe602e7a89ccffa54d1a5b82e2da4e5330c..d3fb97d97cbcb61f68deda7c3009a83393515656 100644 (file)
@@ -822,7 +822,10 @@ static int macvlan_changelink(struct net_device *dev,
 
 static size_t macvlan_get_size(const struct net_device *dev)
 {
-       return nla_total_size(4);
+       return (0
+               + nla_total_size(4) /* IFLA_MACVLAN_MODE */
+               + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+               );
 }
 
 static int macvlan_fill_info(struct sk_buff *skb,
index d5199cb4caecd06c1774295765476d3a508e4a10..b5ddd5077a80dd9c6324c92e68a1ecd8a03314e7 100644 (file)
@@ -36,8 +36,9 @@ MODULE_LICENSE("GPL");
 
 /* IP101A/G - IP1001 */
 #define IP10XX_SPEC_CTRL_STATUS                16      /* Spec. Control Register */
+#define IP1001_RXPHASE_SEL             (1<<0)  /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL             (1<<1)  /* Add delay on TX_CLK */
 #define IP1001_SPEC_CTRL_STATUS_2      20      /* IP1001 Spec. Control Reg 2 */
-#define IP1001_PHASE_SEL_MASK          3       /* IP1001 RX/TXPHASE_SEL */
 #define IP1001_APS_ON                  11      /* IP1001 APS Mode  bit */
 #define IP101A_G_APS_ON                        2       /* IP101A/G APS Mode bit */
 #define IP101A_G_IRQ_CONF_STATUS       0x11    /* Conf Info IRQ & Status Reg */
@@ -138,19 +139,24 @@ static int ip1001_config_init(struct phy_device *phydev)
        if (c < 0)
                return c;
 
-       /* INTR pin used: speed/link/duplex will cause an interrupt */
-       c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
-       if (c < 0)
-               return c;
+       if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+           (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+           (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+           (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
 
-       if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
-               /* Additional delay (2ns) used to adjust RX clock phase
-                * at RGMII interface */
                c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
                if (c < 0)
                        return c;
 
-               c |= IP1001_PHASE_SEL_MASK;
+               c &= ~(IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+                       c |= (IP1001_RXPHASE_SEL | IP1001_TXPHASE_SEL);
+               else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+                       c |= IP1001_RXPHASE_SEL;
+               else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+                       c |= IP1001_TXPHASE_SEL;
+
                c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
                if (c < 0)
                        return c;
@@ -167,6 +173,11 @@ static int ip101a_g_config_init(struct phy_device *phydev)
        if (c < 0)
                return c;
 
+       /* INTR pin used: speed/link/duplex will cause an interrupt */
+       c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+       if (c < 0)
+               return c;
+
        /* Enable Auto Power Saving mode */
        c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
        c |= IP101A_G_APS_ON;
index 5d2a3f2158876bb39ffc02f2647a57676b1bb718..22dec9c7ef05d7480e87b575605e2240f4476a20 100644 (file)
@@ -353,15 +353,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
        int err;
        int temp;
 
-       /* Enable Fiber/Copper auto selection */
-       temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
-       temp &= ~MII_M1111_HWCFG_FIBER_COPPER_AUTO;
-       phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
-
-       temp = phy_read(phydev, MII_BMCR);
-       temp |= BMCR_RESET;
-       phy_write(phydev, MII_BMCR, temp);
-
        if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
            (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
            (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
index fbd106edbe59bdcae871e29d6fd70c993634cdea..2917a86f4c43ec44a3e18566acd7dd00d071660a 100644 (file)
@@ -109,11 +109,11 @@ struct tap_filter {
        unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
 };
 
-/* 1024 is probably a high enough limit: modern hypervisors seem to support on
- * the order of 100-200 CPUs so this leaves us some breathing space if we want
- * to match a queue per guest CPU.
- */
-#define MAX_TAP_QUEUES 1024
+/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
+ * the netdevice to be fit in one page. So we can make sure the success of
+ * memory allocation. TODO: increase the limit. */
+#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
+#define MAX_TAP_FLOWS  4096
 
 #define TUN_FLOW_EXPIRE (3 * HZ)
 
@@ -185,6 +185,8 @@ struct tun_struct {
        unsigned long ageing_time;
        unsigned int numdisabled;
        struct list_head disabled;
+       void *security;
+       u32 flow_count;
 };
 
 static inline u32 tun_hashfn(u32 rxhash)
@@ -218,6 +220,7 @@ static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
                e->queue_index = queue_index;
                e->tun = tun;
                hlist_add_head_rcu(&e->hash_link, head);
+               ++tun->flow_count;
        }
        return e;
 }
@@ -228,6 +231,7 @@ static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
                  e->rxhash, e->queue_index);
        hlist_del_rcu(&e->hash_link);
        kfree_rcu(e, rcu);
+       --tun->flow_count;
 }
 
 static void tun_flow_flush(struct tun_struct *tun)
@@ -294,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data)
 }
 
 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
-                           u16 queue_index)
+                           struct tun_file *tfile)
 {
        struct hlist_head *head;
        struct tun_flow_entry *e;
        unsigned long delay = tun->ageing_time;
+       u16 queue_index = tfile->queue_index;
 
        if (!rxhash)
                return;
@@ -307,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 
        rcu_read_lock();
 
-       if (tun->numqueues == 1)
+       /* We may get a very small possibility of OOO during switching, not
+        * worth to optimize.*/
+       if (tun->numqueues == 1 || tfile->detached)
                goto unlock;
 
        e = tun_flow_find(head, rxhash);
@@ -317,7 +324,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
                e->updated = jiffies;
        } else {
                spin_lock_bh(&tun->lock);
-               if (!tun_flow_find(head, rxhash))
+               if (!tun_flow_find(head, rxhash) &&
+                   tun->flow_count < MAX_TAP_FLOWS)
                        tun_flow_create(tun, head, rxhash, queue_index);
 
                if (!timer_pending(&tun->flow_gc_timer))
@@ -404,24 +412,23 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        struct tun_struct *tun;
        struct net_device *dev;
 
-       tun = rcu_dereference_protected(tfile->tun,
-                                       lockdep_rtnl_is_held());
-       if (tun) {
+       tun = rtnl_dereference(tfile->tun);
+
+       if (tun && !tfile->detached) {
                u16 index = tfile->queue_index;
                BUG_ON(index >= tun->numqueues);
                dev = tun->dev;
 
                rcu_assign_pointer(tun->tfiles[index],
                                   tun->tfiles[tun->numqueues - 1]);
-               rcu_assign_pointer(tfile->tun, NULL);
-               ntfile = rcu_dereference_protected(tun->tfiles[index],
-                                                  lockdep_rtnl_is_held());
+               ntfile = rtnl_dereference(tun->tfiles[index]);
                ntfile->queue_index = index;
 
                --tun->numqueues;
-               if (clean)
+               if (clean) {
+                       rcu_assign_pointer(tfile->tun, NULL);
                        sock_put(&tfile->sk);
-               else
+               else
                        tun_disable_queue(tun, tfile);
 
                synchronize_net();
@@ -429,14 +436,19 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                /* Drop read queue */
                skb_queue_purge(&tfile->sk.sk_receive_queue);
                tun_set_real_num_queues(tun);
-       } else if (tfile->detached && clean)
+       } else if (tfile->detached && clean) {
                tun = tun_enable_queue(tfile);
+               sock_put(&tfile->sk);
+       }
 
        if (clean) {
-               if (tun && tun->numqueues == 0 && tun->numdisabled == 0 &&
-                   !(tun->flags & TUN_PERSIST))
-                       if (tun->dev->reg_state == NETREG_REGISTERED)
+               if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
+                       netif_carrier_off(tun->dev);
+
+                       if (!(tun->flags & TUN_PERSIST) &&
+                           tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
+               }
 
                BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
                                 &tfile->socket.flags));
@@ -458,19 +470,21 @@ static void tun_detach_all(struct net_device *dev)
        int i, n = tun->numqueues;
 
        for (i = 0; i < n; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
                wake_up_all(&tfile->wq.wait);
                rcu_assign_pointer(tfile->tun, NULL);
                --tun->numqueues;
        }
+       list_for_each_entry(tfile, &tun->disabled, next) {
+               wake_up_all(&tfile->wq.wait);
+               rcu_assign_pointer(tfile->tun, NULL);
+       }
        BUG_ON(tun->numqueues != 0);
 
        synchronize_net();
        for (i = 0; i < n; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                /* Drop read queue */
                skb_queue_purge(&tfile->sk.sk_receive_queue);
                sock_put(&tfile->sk);
@@ -481,6 +495,9 @@ static void tun_detach_all(struct net_device *dev)
                sock_put(&tfile->sk);
        }
        BUG_ON(tun->numdisabled != 0);
+
+       if (tun->flags & TUN_PERSIST)
+               module_put(THIS_MODULE);
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file)
@@ -488,8 +505,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
        struct tun_file *tfile = file->private_data;
        int err;
 
+       err = security_tun_dev_attach(tfile->socket.sk, tun->security);
+       if (err < 0)
+               goto out;
+
        err = -EINVAL;
-       if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+       if (rtnl_dereference(tfile->tun) && !tfile->detached)
                goto out;
 
        err = -EBUSY;
@@ -1188,7 +1209,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        tun->dev->stats.rx_packets++;
        tun->dev->stats.rx_bytes += len;
 
-       tun_flow_update(tun, rxhash, tfile->queue_index);
+       tun_flow_update(tun, rxhash, tfile);
        return total_len;
 }
 
@@ -1371,6 +1392,7 @@ static void tun_free_netdev(struct net_device *dev)
 
        BUG_ON(!(list_empty(&tun->disabled)));
        tun_flow_uninit(tun);
+       security_tun_dev_free_security(tun->security);
        free_netdev(dev);
 }
 
@@ -1544,6 +1566,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
        struct net_device *dev;
        int err;
 
+       if (tfile->detached)
+               return -EINVAL;
+
        dev = __dev_get_by_name(net, ifr->ifr_name);
        if (dev) {
                if (ifr->ifr_flags & IFF_TUN_EXCL)
@@ -1557,7 +1582,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                if (tun_not_capable(tun))
                        return -EPERM;
-               err = security_tun_dev_attach(tfile->socket.sk);
+               err = security_tun_dev_open(tun->security);
                if (err < 0)
                        return err;
 
@@ -1572,6 +1597,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
        else {
                char *name;
                unsigned long flags = 0;
+               int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
+                            MAX_TAP_QUEUES : 1;
 
                if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                        return -EPERM;
@@ -1595,8 +1622,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        name = ifr->ifr_name;
 
                dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
-                                      tun_setup,
-                                      MAX_TAP_QUEUES, MAX_TAP_QUEUES);
+                                      tun_setup, queues, queues);
+
                if (!dev)
                        return -ENOMEM;
 
@@ -1614,7 +1641,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 
                spin_lock_init(&tun->lock);
 
-               security_tun_dev_post_create(&tfile->sk);
+               err = security_tun_dev_alloc_security(&tun->security);
+               if (err < 0)
+                       goto err_free_dev;
 
                tun_net_init(dev);
 
@@ -1639,10 +1668,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
                        pr_err("Failed to create tun sysfs files\n");
-
-               netif_carrier_on(tun->dev);
        }
 
+       netif_carrier_on(tun->dev);
+
        tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
        if (ifr->ifr_flags & IFF_NO_PI)
@@ -1738,8 +1767,7 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
        struct tun_file *tfile;
 
        for (i = 0; i < n; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                sk_detach_filter(tfile->socket.sk);
        }
 
@@ -1752,8 +1780,7 @@ static int tun_attach_filter(struct tun_struct *tun)
        struct tun_file *tfile;
 
        for (i = 0; i < tun->numqueues; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                                 lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
                if (ret) {
                        tun_detach_filter(tun, i);
@@ -1771,8 +1798,7 @@ static void tun_set_sndbuf(struct tun_struct *tun)
        int i;
 
        for (i = 0; i < tun->numqueues; i++) {
-               tfile = rcu_dereference_protected(tun->tfiles[i],
-                                               lockdep_rtnl_is_held());
+               tfile = rtnl_dereference(tun->tfiles[i]);
                tfile->socket.sk->sk_sndbuf = tun->sndbuf;
        }
 }
@@ -1787,22 +1813,24 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
 
        if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
                tun = tfile->detached;
-               if (!tun)
+               if (!tun) {
                        ret = -EINVAL;
-               else if (tun_not_capable(tun))
-                       ret = -EPERM;
-               else
-                       ret = tun_attach(tun, file);
+                       goto unlock;
+               }
+               ret = security_tun_dev_attach_queue(tun->security);
+               if (ret < 0)
+                       goto unlock;
+               ret = tun_attach(tun, file);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
-               tun = rcu_dereference_protected(tfile->tun,
-                                               lockdep_rtnl_is_held());
-               if (!tun || !(tun->flags & TUN_TAP_MQ))
+               tun = rtnl_dereference(tfile->tun);
+               if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
                        ret = -EINVAL;
                else
                        __tun_detach(tfile, false);
        } else
                ret = -EINVAL;
 
+unlock:
        rtnl_unlock();
        return ret;
 }
@@ -1880,10 +1908,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                /* Disable/Enable persist mode. Keep an extra reference to the
                 * module to prevent the module being unprobed.
                 */
-               if (arg) {
+               if (arg && !(tun->flags & TUN_PERSIST)) {
                        tun->flags |= TUN_PERSIST;
                        __module_get(THIS_MODULE);
-               } else {
+               }
+               if (!arg && (tun->flags & TUN_PERSIST)) {
                        tun->flags &= ~TUN_PERSIST;
                        module_put(THIS_MODULE);
                }
index 42f51c71ec1fc7444c668153bdaa200a2b0f4315..248d2dc765a5c06c64ab3c27c47f38d36bf14c20 100644 (file)
@@ -374,6 +374,21 @@ static const struct driver_info cdc_mbim_info = {
        .tx_fixup = cdc_mbim_tx_fixup,
 };
 
+/* MBIM and NCM devices should not need a ZLP after NTBs with
+ * dwNtbOutMaxSize length. This driver_info is for the exceptional
+ * devices requiring it anyway, allowing them to be supported without
+ * forcing the performance penalty on all the sane devices.
+ */
+static const struct driver_info cdc_mbim_info_zlp = {
+       .description = "CDC MBIM",
+       .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
+       .bind = cdc_mbim_bind,
+       .unbind = cdc_mbim_unbind,
+       .manage_power = cdc_mbim_manage_power,
+       .rx_fixup = cdc_mbim_rx_fixup,
+       .tx_fixup = cdc_mbim_tx_fixup,
+};
+
 static const struct usb_device_id mbim_devs[] = {
        /* This duplicate NCM entry is intentional. MBIM devices can
         * be disguised as NCM by default, and this is necessary to
@@ -385,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info,
        },
+       /* Sierra Wireless MC7710 need ZLPs */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+       },
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info,
        },
index 71b6e92b8e9b687c70d0eda9416f1e235a788747..00d3b2d37828f3f81afb6705965e80e128e7bb3d 100644 (file)
@@ -435,6 +435,13 @@ advance:
                len -= temp;
        }
 
+       /* some buggy devices have an IAD but no CDC Union */
+       if (!ctx->union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) {
+               ctx->control = intf;
+               ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1);
+               dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n");
+       }
+
        /* check if we got everything */
        if ((ctx->control == NULL) || (ctx->data == NULL) ||
            ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
@@ -497,7 +504,8 @@ advance:
 error2:
        usb_set_intfdata(ctx->control, NULL);
        usb_set_intfdata(ctx->data, NULL);
-       usb_driver_release_interface(driver, ctx->data);
+       if (ctx->data != ctx->control)
+               usb_driver_release_interface(driver, ctx->data);
 error:
        cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
        dev->data[0] = 0;
@@ -1155,6 +1163,20 @@ static const struct driver_info wwan_info = {
        .tx_fixup = cdc_ncm_tx_fixup,
 };
 
+/* Same as wwan_info, but with FLAG_NOARP  */
+static const struct driver_info wwan_noarp_info = {
+       .description = "Mobile Broadband Network Device (NO ARP)",
+       .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+                       | FLAG_WWAN | FLAG_NOARP,
+       .bind = cdc_ncm_bind,
+       .unbind = cdc_ncm_unbind,
+       .check_connect = cdc_ncm_check_connect,
+       .manage_power = usbnet_manage_power,
+       .status = cdc_ncm_status,
+       .rx_fixup = cdc_ncm_rx_fixup,
+       .tx_fixup = cdc_ncm_tx_fixup,
+};
+
 static const struct usb_device_id cdc_devs[] = {
        /* Ericsson MBM devices like F5521gw */
        { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -1193,6 +1215,16 @@ static const struct usb_device_id cdc_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
          .driver_info = (unsigned long)&wwan_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+         .driver_info = (unsigned long)&wwan_info,
+       },
+
+       /* Infineon(now Intel) HSPA Modem platform */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1519, 0x0443,
+               USB_CLASS_COMM,
+               USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&wwan_noarp_info,
+       },
 
        /* Generic CDC-NCM devices */
        { USB_INTERFACE_INFO(USB_CLASS_COMM,
index 3f554c1149f36d2cf3221ee38e4f02507d566ba7..d7e99445518eb5981d064a7378d7fcf02e4bf5cd 100644 (file)
 #define DM_MCAST_ADDR  0x16    /* 8 bytes */
 #define DM_GPR_CTRL    0x1e
 #define DM_GPR_DATA    0x1f
+#define DM_CHIP_ID     0x2c
+#define DM_MODE_CTRL   0x91    /* only on dm9620 */
+
+/* chip id values */
+#define ID_DM9601      0
+#define ID_DM9620      1
 
 #define DM_MAX_MCAST   64
 #define DM_MCAST_SIZE  8
@@ -53,7 +59,6 @@
 #define DM_RX_OVERHEAD 7       /* 3 byte header + 4 byte crc tail */
 #define DM_TIMEOUT     1000
 
-
 static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
 {
        int err;
@@ -84,32 +89,23 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
 
 static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
 {
-       return usbnet_write_cmd(dev, DM_WRITE_REGS,
+       return usbnet_write_cmd(dev, DM_WRITE_REG,
                                USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
                                value, reg, NULL, 0);
 }
 
-static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
-                                 u16 length, void *data)
+static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
 {
        usbnet_write_cmd_async(dev, DM_WRITE_REGS,
                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                              value, reg, data, length);
-}
-
-static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
-{
-       netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
-
-       dm_write_async_helper(dev, reg, 0, length, data);
+                              0, reg, data, length);
 }
 
 static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
 {
-       netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
-                  reg, value);
-
-       dm_write_async_helper(dev, reg, value, 0, NULL);
+       usbnet_write_cmd_async(dev, DM_WRITE_REG,
+                              USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                              value, reg, NULL, 0);
 }
 
 static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
@@ -358,7 +354,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
 static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
 {
        int ret;
-       u8 mac[ETH_ALEN];
+       u8 mac[ETH_ALEN], id;
 
        ret = usbnet_get_endpoints(dev, intf);
        if (ret)
@@ -399,6 +395,24 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
                __dm9601_set_mac_address(dev);
        }
 
+       if (dm_read_reg(dev, DM_CHIP_ID, &id) < 0) {
+               netdev_err(dev->net, "Error reading chip ID\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       /* put dm9620 devices in dm9601 mode */
+       if (id == ID_DM9620) {
+               u8 mode;
+
+               if (dm_read_reg(dev, DM_MODE_CTRL, &mode) < 0) {
+                       netdev_err(dev->net, "Error reading MODE_CTRL\n");
+                       ret = -ENODEV;
+                       goto out;
+               }
+               dm_write_reg(dev, DM_MODE_CTRL, mode & 0x7f);
+       }
+
        /* power up phy */
        dm_write_reg(dev, DM_GPR_CTRL, 1);
        dm_write_reg(dev, DM_GPR_DATA, 0);
@@ -581,6 +595,10 @@ static const struct usb_device_id products[] = {
         USB_DEVICE(0x0a46, 0x9000),    /* DM9000E */
         .driver_info = (unsigned long)&dm9601_info,
         },
+       {
+        USB_DEVICE(0x0a46, 0x9620),    /* DM9620 USB to Fast Ethernet Adapter */
+        .driver_info = (unsigned long)&dm9601_info,
+        },
        {},                     // END
 };
 
index 6a1ca500e61267e9ba8858d3ed7405990b28e43d..19d903598b0dcf9d85097ed5a15d742bf0873566 100644 (file)
@@ -351,6 +351,10 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HUAWEI_INTERFACE_NDIS_CONTROL_QUALCOMM */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
 
        /* 2. Combined interface devices matching on class+protocol */
        {       /* Huawei E367 and possibly others in "Windows mode" */
@@ -361,6 +365,14 @@ static const struct usb_device_id products[] = {
                USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HUAWEI_NDIS_SINGLE_INTERFACE_VDF */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x37),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
+       {       /* HUAWEI_INTERFACE_NDIS_HW_QUALCOMM */
+               USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x67),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
        {       /* Pantech UML290, P4200 and more */
                USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
                .driver_info        = (unsigned long)&qmi_wwan_info,
@@ -399,6 +411,7 @@ static const struct usb_device_id products[] = {
        },
 
        /* 3. Combined interface devices matching on interface number */
+       {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
@@ -433,6 +446,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x0199, 1)},    /* ZTE MF820S */
        {QMI_FIXED_INTF(0x19d2, 0x0200, 1)},
        {QMI_FIXED_INTF(0x19d2, 0x0257, 3)},    /* ZTE MF821 */
+       {QMI_FIXED_INTF(0x19d2, 0x0265, 4)},    /* ONDA MT8205 4G LTE */
        {QMI_FIXED_INTF(0x19d2, 0x0284, 4)},    /* ZTE MF880 */
        {QMI_FIXED_INTF(0x19d2, 0x0326, 4)},    /* ZTE MF821D */
        {QMI_FIXED_INTF(0x19d2, 0x1008, 4)},    /* ZTE (Vodafone) K3570-Z */
@@ -459,6 +473,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x68a2, 19)},   /* Sierra Wireless MC7710 in QMI mode */
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+       {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
index 3d4bf01641b49f0f7c5d820a547885806378c4e2..5e33606c136639f9b9ad7cf38e7ed8c48c7a51eb 100644 (file)
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
        unsigned long           lockflags;
        size_t                  size = dev->rx_urb_size;
 
+       /* prevent rx skb allocation when error ratio is high */
+       if (test_bit(EVENT_RX_KILL, &dev->flags)) {
+               usb_free_urb(urb);
+               return -ENOLINK;
+       }
+
        skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
        if (!skb) {
                netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
@@ -539,6 +545,17 @@ block:
                break;
        }
 
+       /* stop rx if packet error rate is high */
+       if (++dev->pkt_cnt > 30) {
+               dev->pkt_cnt = 0;
+               dev->pkt_err = 0;
+       } else {
+               if (state == rx_cleanup)
+                       dev->pkt_err++;
+               if (dev->pkt_err > 20)
+                       set_bit(EVENT_RX_KILL, &dev->flags);
+       }
+
        state = defer_bh(dev, skb, &dev->rxq, state);
 
        if (urb) {
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net)
                   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
                   "simple");
 
+       /* reset rx error state */
+       dev->pkt_cnt = 0;
+       dev->pkt_err = 0;
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // delay posting reads until we're fully open
        tasklet_schedule (&dev->bh);
        if (info->manage_power) {
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
        if (info->tx_fixup) {
                skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
                if (!skb) {
-                       if (netif_msg_tx_err(dev)) {
-                               netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
-                               goto drop;
-                       } else {
-                               /* cdc_ncm collected packet; waits for more */
+                       /* packet collected; minidriver waiting for more */
+                       if (info->flags & FLAG_MULTI_PACKET)
                                goto not_drop;
-                       }
+                       netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+                       goto drop;
                }
        }
        length = skb->len;
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param)
                }
        }
 
+       /* restart RX again after disabling due to high error rate */
+       clear_bit(EVENT_RX_KILL, &dev->flags);
+
        // waiting for all pending urbs to complete?
        if (dev->wait) {
                if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
@@ -1448,6 +1471,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
                if ((dev->driver_info->flags & FLAG_WWAN) != 0)
                        strcpy(net->name, "wwan%d");
 
+               /* devices that cannot do ARP */
+               if ((dev->driver_info->flags & FLAG_NOARP) != 0)
+                       net->flags |= IFF_NOARP;
+
                /* maybe the remote can't receive an Ethernet MTU */
                if (net->mtu > (dev->hard_mtu - net->hard_header_len))
                        net->mtu = dev->hard_mtu - net->hard_header_len;
index a6fcf15adc4ff3d36d928f44cc0ac1f49d249820..35c00c5ea02adcbdf6f2855b7ee506544944843a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/scatterlist.h>
 #include <linux/if_vlan.h>
 #include <linux/slab.h>
+#include <linux/cpu.h>
 
 static int napi_weight = 128;
 module_param(napi_weight, int, 0444);
@@ -123,6 +124,12 @@ struct virtnet_info {
 
        /* Does the affinity hint is set for virtqueues? */
        bool affinity_hint_set;
+
+       /* Per-cpu variable to show the mapping from CPU to virtqueue */
+       int __percpu *vq_index;
+
+       /* CPU hot plug notifier */
+       struct notifier_block nb;
 };
 
 struct skb_vnet_hdr {
@@ -1013,32 +1020,75 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
        return 0;
 }
 
-static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
 {
        int i;
+       int cpu;
+
+       if (vi->affinity_hint_set) {
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       virtqueue_set_affinity(vi->rq[i].vq, -1);
+                       virtqueue_set_affinity(vi->sq[i].vq, -1);
+               }
+
+               vi->affinity_hint_set = false;
+       }
+
+       i = 0;
+       for_each_online_cpu(cpu) {
+               if (cpu == hcpu) {
+                       *per_cpu_ptr(vi->vq_index, cpu) = -1;
+               } else {
+                       *per_cpu_ptr(vi->vq_index, cpu) =
+                               ++i % vi->curr_queue_pairs;
+               }
+       }
+}
+
+static void virtnet_set_affinity(struct virtnet_info *vi)
+{
+       int i;
+       int cpu;
 
        /* In multiqueue mode, when the number of cpu is equal to the number of
         * queue pairs, we let the queue pairs to be private to one cpu by
         * setting the affinity hint to eliminate the contention.
         */
-       if ((vi->curr_queue_pairs == 1 ||
-            vi->max_queue_pairs != num_online_cpus()) && set) {
-               if (vi->affinity_hint_set)
-                       set = false;
-               else
-                       return;
+       if (vi->curr_queue_pairs == 1 ||
+           vi->max_queue_pairs != num_online_cpus()) {
+               virtnet_clean_affinity(vi, -1);
+               return;
        }
 
-       for (i = 0; i < vi->max_queue_pairs; i++) {
-               int cpu = set ? i : -1;
+       i = 0;
+       for_each_online_cpu(cpu) {
                virtqueue_set_affinity(vi->rq[i].vq, cpu);
                virtqueue_set_affinity(vi->sq[i].vq, cpu);
+               *per_cpu_ptr(vi->vq_index, cpu) = i;
+               i++;
        }
 
-       if (set)
-               vi->affinity_hint_set = true;
-       else
-               vi->affinity_hint_set = false;
+       vi->affinity_hint_set = true;
+}
+
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+                               unsigned long action, void *hcpu)
+{
+       struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+       switch(action & ~CPU_TASKS_FROZEN) {
+       case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
+       case CPU_DEAD:
+               virtnet_set_affinity(vi);
+               break;
+       case CPU_DOWN_PREPARE:
+               virtnet_clean_affinity(vi, (long)hcpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
 }
 
 static void virtnet_get_ringparam(struct net_device *dev,
@@ -1082,13 +1132,15 @@ static int virtnet_set_channels(struct net_device *dev,
        if (queue_pairs > vi->max_queue_pairs)
                return -EINVAL;
 
+       get_online_cpus();
        err = virtnet_set_queues(vi, queue_pairs);
        if (!err) {
                netif_set_real_num_tx_queues(dev, queue_pairs);
                netif_set_real_num_rx_queues(dev, queue_pairs);
 
-               virtnet_set_affinity(vi, true);
+               virtnet_set_affinity(vi);
        }
+       put_online_cpus();
 
        return err;
 }
@@ -1127,12 +1179,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
 
 /* To avoid contending a lock hold by a vcpu who would exit to host, select the
  * txq based on the processor id.
- * TODO: handle cpu hotplug.
  */
 static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
-       int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
-                 smp_processor_id();
+       int txq;
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       if (skb_rx_queue_recorded(skb)) {
+               txq = skb_get_rx_queue(skb);
+       } else {
+               txq = *__this_cpu_ptr(vi->vq_index);
+               if (txq == -1)
+                       txq = 0;
+       }
 
        while (unlikely(txq >= dev->real_num_tx_queues))
                txq -= dev->real_num_tx_queues;
@@ -1248,7 +1307,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
 {
        struct virtio_device *vdev = vi->vdev;
 
-       virtnet_set_affinity(vi, false);
+       virtnet_clean_affinity(vi, -1);
 
        vdev->config->del_vqs(vdev);
 
@@ -1371,7 +1430,10 @@ static int init_vqs(struct virtnet_info *vi)
        if (ret)
                goto err_free;
 
-       virtnet_set_affinity(vi, true);
+       get_online_cpus();
+       virtnet_set_affinity(vi);
+       put_online_cpus();
+
        return 0;
 
 err_free:
@@ -1453,6 +1515,10 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (vi->stats == NULL)
                goto free;
 
+       vi->vq_index = alloc_percpu(int);
+       if (vi->vq_index == NULL)
+               goto free_stats;
+
        mutex_init(&vi->config_lock);
        vi->config_enable = true;
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
@@ -1476,7 +1542,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
        err = init_vqs(vi);
        if (err)
-               goto free_stats;
+               goto free_index;
 
        netif_set_real_num_tx_queues(dev, 1);
        netif_set_real_num_rx_queues(dev, 1);
@@ -1499,6 +1565,13 @@ static int virtnet_probe(struct virtio_device *vdev)
                }
        }
 
+       vi->nb.notifier_call = &virtnet_cpu_callback;
+       err = register_hotcpu_notifier(&vi->nb);
+       if (err) {
+               pr_debug("virtio_net: registering cpu notifier failed\n");
+               goto free_recv_bufs;
+       }
+
        /* Assume link up if device can't report link status,
           otherwise get link status from config. */
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -1520,6 +1593,8 @@ free_recv_bufs:
 free_vqs:
        cancel_delayed_work_sync(&vi->refill);
        virtnet_del_vqs(vi);
+free_index:
+       free_percpu(vi->vq_index);
 free_stats:
        free_percpu(vi->stats);
 free:
@@ -1543,6 +1618,8 @@ static void virtnet_remove(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
 
+       unregister_hotcpu_notifier(&vi->nb);
+
        /* Prevent config work handler from accessing the device. */
        mutex_lock(&vi->config_lock);
        vi->config_enable = false;
@@ -1554,6 +1631,7 @@ static void virtnet_remove(struct virtio_device *vdev)
 
        flush_work(&vi->config_work);
 
+       free_percpu(vi->vq_index);
        free_percpu(vi->stats);
        free_netdev(vi->dev);
 }
index dc8913c6238c965b0905ef7ad6784bf669f06388..12c6440d16499ebd4a1faaa4e07edc8658b5f198 100644 (file)
@@ -154,8 +154,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
        if (ret & 1) { /* Link is up. */
                printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
                       adapter->netdev->name, adapter->link_speed);
-               if (!netif_carrier_ok(adapter->netdev))
-                       netif_carrier_on(adapter->netdev);
+               netif_carrier_on(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -165,8 +164,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
        } else {
                printk(KERN_INFO "%s: NIC Link is Down\n",
                       adapter->netdev->name);
-               if (netif_carrier_ok(adapter->netdev))
-                       netif_carrier_off(adapter->netdev);
+               netif_carrier_off(adapter->netdev);
 
                if (affectTxQueue) {
                        for (i = 0; i < adapter->num_tx_queues; i++)
@@ -3061,6 +3059,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
        netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
 
+       netif_carrier_off(netdev);
        err = register_netdev(netdev);
 
        if (err) {
index 1d76ae855f077dd5d3dbb7230d392ecc7c0b3c68..530581ca019111e79ec366f0fa702ab6341fc977 100644 (file)
@@ -156,7 +156,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
        struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
        struct net_device *net_dev = i2400m->wimax_dev.net_dev;
        struct device *dev = i2400m_dev(i2400m);
-       struct sk_buff *skb = i2400m->wake_tx_skb;
+       struct sk_buff *skb;
        unsigned long flags;
 
        spin_lock_irqsave(&i2400m->tx_lock, flags);
@@ -236,23 +236,26 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
 void i2400m_net_wake_stop(struct i2400m *i2400m)
 {
        struct device *dev = i2400m_dev(i2400m);
+       struct sk_buff *wake_tx_skb;
+       unsigned long flags;
 
        d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
-       /* See i2400m_hard_start_xmit(), references are taken there
-        * and here we release them if the work was still
-        * pending. Note we can't differentiate work not pending vs
-        * never scheduled, so the NULL check does that. */
-       if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
-           && i2400m->wake_tx_skb != NULL) {
-               unsigned long flags;
-               struct sk_buff *wake_tx_skb;
-               spin_lock_irqsave(&i2400m->tx_lock, flags);
-               wake_tx_skb = i2400m->wake_tx_skb;      /* compat help */
-               i2400m->wake_tx_skb = NULL;     /* compat help */
-               spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+       /*
+        * See i2400m_hard_start_xmit(), references are taken there and
+        * here we release them if the packet was still pending.
+        */
+       cancel_work_sync(&i2400m->wake_tx_ws);
+
+       spin_lock_irqsave(&i2400m->tx_lock, flags);
+       wake_tx_skb = i2400m->wake_tx_skb;
+       i2400m->wake_tx_skb = NULL;
+       spin_unlock_irqrestore(&i2400m->tx_lock, flags);
+
+       if (wake_tx_skb) {
                i2400m_put(i2400m);
                kfree_skb(wake_tx_skb);
        }
+
        d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
 }
 
@@ -288,7 +291,7 @@ int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
         * and if pending, release those resources. */
        result = 0;
        spin_lock_irqsave(&i2400m->tx_lock, flags);
-       if (!work_pending(&i2400m->wake_tx_ws)) {
+       if (!i2400m->wake_tx_skb) {
                netif_stop_queue(net_dev);
                i2400m_get(i2400m);
                i2400m->wake_tx_skb = skb_get(skb);     /* transfer ref count */
index 1a67a4f829fe9f3fca15a6ad3533e335bec70a25..2c02b4e84094d32fc5605deaebb0ed148087fbf5 100644 (file)
@@ -30,5 +30,6 @@ source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/carl9170/Kconfig"
 source "drivers/net/wireless/ath/ath6kl/Kconfig"
 source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
 
 endif
index 1e18621326dc2fc5c70335e43e4e781436820a84..97b964ded2bef25e4218cc3843377ada32732077 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_ATH9K_HW)          += ath9k/
 obj-$(CONFIG_CARL9170)         += carl9170/
 obj-$(CONFIG_ATH6KL)           += ath6kl/
 obj-$(CONFIG_AR5523)           += ar5523/
+obj-$(CONFIG_WIL6210)          += wil6210/
 
 obj-$(CONFIG_ATH_COMMON)       += ath.o
 
index 8b0d8dcd76255239e7451b4a1258adca79ef8342..56317b0fb6b692f3f9ae20b8ba681ec18371788f 100644 (file)
@@ -976,6 +976,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
                                          AR_PHY_CL_TAB_1,
                                          AR_PHY_CL_TAB_2 };
 
+       ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
+
        if (rtt) {
                if (!ar9003_hw_rtt_restore(ah, chan))
                        run_rtt_cal = true;
index ce19c09fa8e84aec2877e2f14861b0284f4d958e..3afc24bde6d65d88a0d469abb0ef73d93856edcb 100644 (file)
@@ -586,32 +586,19 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
        ath9k_hw_synth_delay(ah, chan, synthDelay);
 }
 
-static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
 {
-       switch (rx) {
-       case 0x5:
+       if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
                REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
                            AR_PHY_SWAP_ALT_CHAIN);
-       case 0x3:
-       case 0x1:
-       case 0x2:
-       case 0x7:
-               REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
-               REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
-               break;
-       default:
-               break;
-       }
+
+       REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
+       REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
 
        if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
-               REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
-       else
-               REG_WRITE(ah, AR_SELFGEN_MASK, tx);
+               tx = 3;
 
-       if (tx == 0x5) {
-               REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
-                           AR_PHY_SWAP_ALT_CHAIN);
-       }
+       REG_WRITE(ah, AR_SELFGEN_MASK, tx);
 }
 
 /*
index 86e26a19efdac1923395543c1a50fb53a6cb8958..42794c546a4068ac91b47c1252153b9040362555 100644 (file)
@@ -317,7 +317,6 @@ struct ath_rx {
        u32 *rxlink;
        u32 num_pkts;
        unsigned int rxfilter;
-       spinlock_t rxbuflock;
        struct list_head rxbuf;
        struct ath_descdma rxdma;
        struct ath_buf *rx_bufptr;
@@ -328,7 +327,6 @@ struct ath_rx {
 
 int ath_startrecv(struct ath_softc *sc);
 bool ath_stoprecv(struct ath_softc *sc);
-void ath_flushrecv(struct ath_softc *sc);
 u32 ath_calcrxfilter(struct ath_softc *sc);
 int ath_rx_init(struct ath_softc *sc, int nbufs);
 void ath_rx_cleanup(struct ath_softc *sc);
@@ -646,7 +644,6 @@ void ath_ant_comb_update(struct ath_softc *sc);
 enum sc_op_flags {
        SC_OP_INVALID,
        SC_OP_BEACONS,
-       SC_OP_RXFLUSH,
        SC_OP_ANI_RUN,
        SC_OP_PRIM_STA_VIF,
        SC_OP_HW_RESET,
index 531fffd801a34eaa11b8d483aca51ac07e7a7fab..2ca355e94da65467e36595990423c80be1b897b6 100644 (file)
@@ -147,6 +147,7 @@ static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
                                 skb->len, DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
                bf->bf_buf_addr = 0;
+               bf->bf_mpdu = NULL;
        }
 
        skb = ieee80211_beacon_get(hw, vif);
@@ -359,7 +360,6 @@ void ath9k_beacon_tasklet(unsigned long data)
                return;
 
        bf = ath9k_beacon_generate(sc->hw, vif);
-       WARN_ON(!bf);
 
        if (sc->beacon.bmisscnt != 0) {
                ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
index 13ff9edc24015e5e2126fc4f6c7fde8dcb003c90..e585fc827c50b2e9d5ff2fe08afb15cd49cedba6 100644 (file)
@@ -861,7 +861,6 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
        RXS_ERR("RX-LENGTH-ERR", rx_len_err);
        RXS_ERR("RX-OOM-ERR", rx_oom_err);
        RXS_ERR("RX-RATE-ERR", rx_rate_err);
-       RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
        RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
 
        PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
index 375c3b46411eee6cd140dbc22e6e435fe0e32ad9..6df2ab62dcb706df5bef4a8afdf04593651f111a 100644 (file)
@@ -216,7 +216,6 @@ struct ath_tx_stats {
  * @rx_oom_err:  No. of frames dropped due to OOM issues.
  * @rx_rate_err:  No. of frames dropped due to rate errors.
  * @rx_too_many_frags_err:  Frames dropped due to too-many-frags received.
- * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
  * @rx_beacons:  No. of beacons received.
  * @rx_frags:  No. of rx-fragements received.
  */
@@ -235,7 +234,6 @@ struct ath_rx_stats {
        u32 rx_oom_err;
        u32 rx_rate_err;
        u32 rx_too_many_frags_err;
-       u32 rx_drop_rxflush;
        u32 rx_beacons;
        u32 rx_frags;
 };
index 4a9570dfba72605d2f492ddf51cde8e8e7db835b..aac4a406a5134727e49fef2999e563e39169d83f 100644 (file)
@@ -344,6 +344,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
                        endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
                                                  skb, htc_hdr->endpoint_id,
                                                  txok);
+               } else {
+                       kfree_skb(skb);
                }
        }
 
index 7f1a8e91c908c2dea314a7171e1d1d59139e6c6a..9d26fc56ca56a6bf7d4e8ac0ed91366df0de50cd 100644 (file)
@@ -1066,6 +1066,7 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
 int ar9003_paprd_init_table(struct ath_hw *ah);
 bool ar9003_paprd_is_done(struct ath_hw *ah);
 bool ar9003_is_paprd_enabled(struct ath_hw *ah);
+void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
 
 /* Hardware family op attach helpers */
 void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
index be30a9af152884d8e073b7a3be5b79e6b3d5a8ee..dd91f8fdc01c3ea44c922bbbbd14bee2df4fd6db 100644 (file)
@@ -182,7 +182,7 @@ static void ath_restart_work(struct ath_softc *sc)
        ath_start_ani(sc);
 }
 
-static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
+static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx)
 {
        struct ath_hw *ah = sc->sc_ah;
        bool ret = true;
@@ -202,14 +202,6 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
        if (!ath_drain_all_txq(sc, retry_tx))
                ret = false;
 
-       if (!flush) {
-               if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
-                       ath_rx_tasklet(sc, 1, true);
-               ath_rx_tasklet(sc, 1, false);
-       } else {
-               ath_flushrecv(sc);
-       }
-
        return ret;
 }
 
@@ -262,11 +254,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_cal_data *caldata = NULL;
        bool fastcc = true;
-       bool flush = false;
        int r;
 
        __ath_cancel_work(sc);
 
+       tasklet_disable(&sc->intr_tq);
        spin_lock_bh(&sc->sc_pcu_lock);
 
        if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
@@ -276,11 +268,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
 
        if (!hchan) {
                fastcc = false;
-               flush = true;
                hchan = ah->curchan;
        }
 
-       if (!ath_prepare_reset(sc, retry_tx, flush))
+       if (!ath_prepare_reset(sc, retry_tx))
                fastcc = false;
 
        ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
@@ -302,6 +293,8 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
 
 out:
        spin_unlock_bh(&sc->sc_pcu_lock);
+       tasklet_enable(&sc->intr_tq);
+
        return r;
 }
 
@@ -804,7 +797,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
                ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
        }
 
-       ath_prepare_reset(sc, false, true);
+       ath_prepare_reset(sc, false);
 
        if (sc->rx.frag) {
                dev_kfree_skb_any(sc->rx.frag);
@@ -1833,6 +1826,9 @@ static u32 fill_chainmask(u32 cap, u32 new)
 
 static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
 {
+       if (AR_SREV_9300_20_OR_LATER(ah))
+               return true;
+
        switch (val & 0x7) {
        case 0x1:
        case 0x3:
index d4df98a938bf4755d1af6238d41e5df0753970c6..90752f2469704bbbb2bf7cae86a1c2cb4029435a 100644 (file)
@@ -254,8 +254,6 @@ rx_init_fail:
 
 static void ath_edma_start_recv(struct ath_softc *sc)
 {
-       spin_lock_bh(&sc->rx.rxbuflock);
-
        ath9k_hw_rxena(sc->sc_ah);
 
        ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
@@ -267,8 +265,6 @@ static void ath_edma_start_recv(struct ath_softc *sc)
        ath_opmode_init(sc);
 
        ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
-
-       spin_unlock_bh(&sc->rx.rxbuflock);
 }
 
 static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -285,8 +281,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
        int error = 0;
 
        spin_lock_init(&sc->sc_pcu_lock);
-       spin_lock_init(&sc->rx.rxbuflock);
-       clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
 
        common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
                             sc->sc_ah->caps.rx_status_len;
@@ -447,7 +441,6 @@ int ath_startrecv(struct ath_softc *sc)
                return 0;
        }
 
-       spin_lock_bh(&sc->rx.rxbuflock);
        if (list_empty(&sc->rx.rxbuf))
                goto start_recv;
 
@@ -468,26 +461,31 @@ start_recv:
        ath_opmode_init(sc);
        ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
 
-       spin_unlock_bh(&sc->rx.rxbuflock);
-
        return 0;
 }
 
+static void ath_flushrecv(struct ath_softc *sc)
+{
+       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
+               ath_rx_tasklet(sc, 1, true);
+       ath_rx_tasklet(sc, 1, false);
+}
+
 bool ath_stoprecv(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
        bool stopped, reset = false;
 
-       spin_lock_bh(&sc->rx.rxbuflock);
        ath9k_hw_abortpcurecv(ah);
        ath9k_hw_setrxfilter(ah, 0);
        stopped = ath9k_hw_stopdmarecv(ah, &reset);
 
+       ath_flushrecv(sc);
+
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                ath_edma_stop_recv(sc);
        else
                sc->rx.rxlink = NULL;
-       spin_unlock_bh(&sc->rx.rxbuflock);
 
        if (!(ah->ah_flags & AH_UNPLUGGED) &&
            unlikely(!stopped)) {
@@ -499,15 +497,6 @@ bool ath_stoprecv(struct ath_softc *sc)
        return stopped && !reset;
 }
 
-void ath_flushrecv(struct ath_softc *sc)
-{
-       set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
-               ath_rx_tasklet(sc, 1, true);
-       ath_rx_tasklet(sc, 1, false);
-       clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
-}
-
 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
 {
        /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
@@ -744,6 +733,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
                        return NULL;
        }
 
+       list_del(&bf->list);
        if (!bf->bf_mpdu)
                return bf;
 
@@ -1059,16 +1049,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                dma_type = DMA_FROM_DEVICE;
 
        qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
-       spin_lock_bh(&sc->rx.rxbuflock);
 
        tsf = ath9k_hw_gettsf64(ah);
        tsf_lower = tsf & 0xffffffff;
 
        do {
                bool decrypt_error = false;
-               /* If handling rx interrupt and flush is in progress => exit */
-               if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
-                       break;
 
                memset(&rs, 0, sizeof(rs));
                if (edma)
@@ -1111,15 +1097,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 
                ath_debug_stat_rx(sc, &rs);
 
-               /*
-                * If we're asked to flush receive queue, directly
-                * chain it back at the queue without processing it.
-                */
-               if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
-                       RX_STAT_INC(rx_drop_rxflush);
-                       goto requeue_drop_frag;
-               }
-
                memset(rxs, 0, sizeof(struct ieee80211_rx_status));
 
                rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
@@ -1254,19 +1231,18 @@ requeue_drop_frag:
                        sc->rx.frag = NULL;
                }
 requeue:
+               list_add_tail(&bf->list, &sc->rx.rxbuf);
+               if (flush)
+                       continue;
+
                if (edma) {
-                       list_add_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_edma_buf_link(sc, qtype);
                } else {
-                       list_move_tail(&bf->list, &sc->rx.rxbuf);
                        ath_rx_buf_link(sc, bf);
-                       if (!flush)
-                               ath9k_hw_rxena(ah);
+                       ath9k_hw_rxena(ah);
                }
        } while (1);
 
-       spin_unlock_bh(&sc->rx.rxbuflock);
-
        if (!(ah->imask & ATH9K_INT_RXEOL)) {
                ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
                ath9k_hw_set_interrupts(ah);
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
new file mode 100644 (file)
index 0000000..bac3d98
--- /dev/null
@@ -0,0 +1,29 @@
+config WIL6210
+       tristate "Wilocity 60g WiFi card wil6210 support"
+       depends on CFG80211
+       depends on PCI
+       default n
+       ---help---
+         This module adds support for wireless adapter based on
+         wil6210 chip by Wilocity. It supports operation on the
+         60 GHz band, covered by the IEEE802.11ad standard.
+
+         http://wireless.kernel.org/en/users/Drivers/wil6210
+
+         If you choose to build it as a module, it will be called
+         wil6210
+
+config WIL6210_ISR_COR
+       bool "Use Clear-On-Read mode for ISR registers for wil6210"
+       depends on WIL6210
+       default y
+       ---help---
+         ISR registers on wil6210 chip may operate in either
+         COR (Clear-On-Read) or W1C (Write-1-to-Clear) mode.
+         For production code, use COR (say y); is default since
+         it saves extra target transaction;
+         For ISR debug, use W1C (say n); is allows to monitor ISR
+         registers with debugfs. If COR were used, ISR would
+         self-clear when accessed for debug purposes, it makes
+         such monitoring impossible.
+         Say y unless you debug interrupts
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
new file mode 100644 (file)
index 0000000..9396dc9
--- /dev/null
@@ -0,0 +1,13 @@
+obj-$(CONFIG_WIL6210) += wil6210.o
+
+wil6210-objs := main.o
+wil6210-objs += netdev.o
+wil6210-objs += cfg80211.o
+wil6210-objs += pcie_bus.o
+wil6210-objs += debugfs.o
+wil6210-objs += wmi.o
+wil6210-objs += interrupt.o
+wil6210-objs += txrx.o
+
+subdir-ccflags-y += -Werror
+subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
new file mode 100644 (file)
index 0000000..116f4e8
--- /dev/null
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <net/cfg80211.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define CHAN60G(_channel, _flags) {                            \
+       .band                   = IEEE80211_BAND_60GHZ,         \
+       .center_freq            = 56160 + (2160 * (_channel)),  \
+       .hw_value               = (_channel),                   \
+       .flags                  = (_flags),                     \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 40,                           \
+}
+
+static struct ieee80211_channel wil_60ghz_channels[] = {
+       CHAN60G(1, 0),
+       CHAN60G(2, 0),
+       CHAN60G(3, 0),
+/* channel 4 not supported yet */
+};
+
+static struct ieee80211_supported_band wil_band_60ghz = {
+       .channels = wil_60ghz_channels,
+       .n_channels = ARRAY_SIZE(wil_60ghz_channels),
+       .ht_cap = {
+               .ht_supported = true,
+               .cap = 0, /* TODO */
+               .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, /* TODO */
+               .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, /* TODO */
+               .mcs = {
+                               /* MCS 1..12 - SC PHY */
+                       .rx_mask = {0xfe, 0x1f}, /* 1..12 */
+                       .tx_params = IEEE80211_HT_MCS_TX_DEFINED, /* TODO */
+               },
+       },
+};
+
+static const struct ieee80211_txrx_stypes
+wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
+       [NL80211_IFTYPE_STATION] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_AP] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_P2P_CLIENT] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+       [NL80211_IFTYPE_P2P_GO] = {
+               .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+       },
+};
+
+static const u32 wil_cipher_suites[] = {
+       WLAN_CIPHER_SUITE_GCMP,
+};
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type)
+{
+       static const struct {
+               enum nl80211_iftype nl;
+               enum wmi_network_type wmi;
+       } __nl2wmi[] = {
+               {NL80211_IFTYPE_ADHOC,          WMI_NETTYPE_ADHOC},
+               {NL80211_IFTYPE_STATION,        WMI_NETTYPE_INFRA},
+               {NL80211_IFTYPE_AP,             WMI_NETTYPE_AP},
+               {NL80211_IFTYPE_P2P_CLIENT,     WMI_NETTYPE_P2P},
+               {NL80211_IFTYPE_P2P_GO,         WMI_NETTYPE_P2P},
+               {NL80211_IFTYPE_MONITOR,        WMI_NETTYPE_ADHOC}, /* FIXME */
+       };
+       uint i;
+
+       for (i = 0; i < ARRAY_SIZE(__nl2wmi); i++) {
+               if (__nl2wmi[i].nl == type)
+                       return __nl2wmi[i].wmi;
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+                                   struct net_device *ndev,
+                                   u8 *mac, struct station_info *sinfo)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+       struct wmi_notify_req_cmd cmd = {
+               .cid = 0,
+               .interval_usec = 0,
+       };
+
+       if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
+               return -ENOENT;
+
+       /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
+       rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
+                     WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+       if (rc)
+               return rc;
+
+       sinfo->generation = wil->sinfo_gen;
+
+       sinfo->filled |= STATION_INFO_TX_BITRATE;
+       sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+       sinfo->txrate.mcs = wil->stats.bf_mcs;
+       sinfo->filled |= STATION_INFO_RX_BITRATE;
+       sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
+       sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+
+       if (test_bit(wil_status_fwconnected, &wil->status)) {
+               sinfo->filled |= STATION_INFO_SIGNAL;
+               sinfo->signal = 12; /* TODO: provide real value */
+       }
+
+       return 0;
+}
+
+static int wil_cfg80211_change_iface(struct wiphy *wiphy,
+                                    struct net_device *ndev,
+                                    enum nl80211_iftype type, u32 *flags,
+                                    struct vif_params *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = wil->wdev;
+
+       switch (type) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_P2P_GO:
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               if (flags)
+                       wil->monitor_flags = *flags;
+               else
+                       wil->monitor_flags = 0;
+
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       wdev->iftype = type;
+
+       return 0;
+}
+
+static int wil_cfg80211_scan(struct wiphy *wiphy,
+                            struct cfg80211_scan_request *request)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = wil->wdev;
+       struct {
+               struct wmi_start_scan_cmd cmd;
+               u16 chnl[4];
+       } __packed cmd;
+       uint i, n;
+
+       if (wil->scan_request) {
+               wil_err(wil, "Already scanning\n");
+               return -EAGAIN;
+       }
+
+       /* check we are client side */
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               break;
+       default:
+               return -EOPNOTSUPP;
+
+       }
+
+       /* FW don't support scan after connection attempt */
+       if (test_bit(wil_status_dontscan, &wil->status)) {
+               wil_err(wil, "Scan after connect attempt not supported\n");
+               return -EBUSY;
+       }
+
+       wil->scan_request = request;
+
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.cmd.num_channels = 0;
+       n = min(request->n_channels, 4U);
+       for (i = 0; i < n; i++) {
+               int ch = request->channels[i]->hw_value;
+               if (ch == 0) {
+                       wil_err(wil,
+                               "Scan requested for unknown frequency %dMhz\n",
+                               request->channels[i]->center_freq);
+                       continue;
+               }
+               /* 0-based channel indexes */
+               cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+               wil_dbg(wil, "Scan for ch %d  : %d MHz\n", ch,
+                       request->channels[i]->center_freq);
+       }
+
+       return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+                       cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+}
+
+static int wil_cfg80211_connect(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               struct cfg80211_connect_params *sme)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct cfg80211_bss *bss;
+       struct wmi_connect_cmd conn;
+       const u8 *ssid_eid;
+       const u8 *rsn_eid;
+       int ch;
+       int rc = 0;
+
+       bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
+                              sme->ssid, sme->ssid_len,
+                              WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+       if (!bss) {
+               wil_err(wil, "Unable to find BSS\n");
+               return -ENOENT;
+       }
+
+       ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
+       if (!ssid_eid) {
+               wil_err(wil, "No SSID\n");
+               rc = -ENOENT;
+               goto out;
+       }
+
+       rsn_eid = sme->ie ?
+                       cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
+                       NULL;
+       if (rsn_eid) {
+               if (sme->ie_len > WMI_MAX_IE_LEN) {
+                       rc = -ERANGE;
+                       wil_err(wil, "IE too large (%td bytes)\n",
+                               sme->ie_len);
+                       goto out;
+               }
+               /*
+                * For secure assoc, send:
+                * (1) WMI_DELETE_CIPHER_KEY_CMD
+                * (2) WMI_SET_APPIE_CMD
+                */
+               rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+               if (rc) {
+                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+                       goto out;
+               }
+               /* WMI_SET_APPIE_CMD */
+               rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
+               if (rc) {
+                       wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+                       goto out;
+               }
+       }
+
+       /* WMI_CONNECT_CMD */
+       memset(&conn, 0, sizeof(conn));
+       switch (bss->capability & 0x03) {
+       case WLAN_CAPABILITY_DMG_TYPE_AP:
+               conn.network_type = WMI_NETTYPE_INFRA;
+               break;
+       case WLAN_CAPABILITY_DMG_TYPE_PBSS:
+               conn.network_type = WMI_NETTYPE_P2P;
+               break;
+       default:
+               wil_err(wil, "Unsupported BSS type, capability= 0x%04x\n",
+                       bss->capability);
+               goto out;
+       }
+       if (rsn_eid) {
+               conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+               conn.auth_mode = WMI_AUTH_WPA2_PSK;
+               conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+               conn.pairwise_crypto_len = 16;
+       } else {
+               conn.dot11_auth_mode = WMI_AUTH11_OPEN;
+               conn.auth_mode = WMI_AUTH_NONE;
+       }
+
+       conn.ssid_len = min_t(u8, ssid_eid[1], 32);
+       memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
+
+       ch = bss->channel->hw_value;
+       if (ch == 0) {
+               wil_err(wil, "BSS at unknown frequency %dMhz\n",
+                       bss->channel->center_freq);
+               rc = -EOPNOTSUPP;
+               goto out;
+       }
+       conn.channel = ch - 1;
+
+       memcpy(conn.bssid, bss->bssid, 6);
+       memcpy(conn.dst_mac, bss->bssid, 6);
+       /*
+        * FW don't support scan after connection attempt
+        */
+       set_bit(wil_status_dontscan, &wil->status);
+
+       rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
+       if (rc == 0) {
+               /* Connect can take lots of time */
+               mod_timer(&wil->connect_timer,
+                         jiffies + msecs_to_jiffies(2000));
+       }
+
+ out:
+       cfg80211_put_bss(bss);
+
+       return rc;
+}
+
+static int wil_cfg80211_disconnect(struct wiphy *wiphy,
+                                  struct net_device *ndev,
+                                  u16 reason_code)
+{
+       int rc;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
+
+       return rc;
+}
+
+static int wil_cfg80211_set_channel(struct wiphy *wiphy,
+                                   struct cfg80211_chan_def *chandef)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = wil->wdev;
+
+       wdev->preset_chandef = *chandef;
+
+       return 0;
+}
+
+static int wil_cfg80211_add_key(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               u8 key_index, bool pairwise,
+                               const u8 *mac_addr,
+                               struct key_params *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       /* group key is not used */
+       if (!pairwise)
+               return 0;
+
+       return wmi_add_cipher_key(wil, key_index, mac_addr,
+                                 params->key_len, params->key);
+}
+
+static int wil_cfg80211_del_key(struct wiphy *wiphy,
+                               struct net_device *ndev,
+                               u8 key_index, bool pairwise,
+                               const u8 *mac_addr)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       /* group key is not used */
+       if (!pairwise)
+               return 0;
+
+       return wmi_del_cipher_key(wil, key_index, mac_addr);
+}
+
+/* Need to be present or wiphy_new() will WARN */
+static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
+                                       struct net_device *ndev,
+                                       u8 key_index, bool unicast,
+                                       bool multicast)
+{
+       return 0;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+                                struct net_device *ndev,
+                                struct cfg80211_ap_settings *info)
+{
+       int rc = 0;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       struct ieee80211_channel *channel = info->chandef.chan;
+       struct cfg80211_beacon_data *bcon = &info->beacon;
+       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       if (!channel) {
+               wil_err(wil, "AP: No channel???\n");
+               return -EINVAL;
+       }
+
+       wil_dbg(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+               channel->center_freq, info->privacy ? "secure" : "open");
+       print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+                            info->ssid, info->ssid_len);
+
+       rc = wil_reset(wil);
+       if (rc)
+               return rc;
+
+       rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+       if (rc)
+               return rc;
+
+       rc = wmi_set_channel(wil, channel->hw_value);
+       if (rc)
+               return rc;
+
+       /* MAC address - pre-requisite for other commands */
+       wmi_set_mac_address(wil, ndev->dev_addr);
+
+       /* IE's */
+       /* bcon 'head IE's are not relevant for 60g band */
+       wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+                  bcon->beacon_ies);
+       wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+                  bcon->proberesp_ies);
+       wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+                  bcon->assocresp_ies);
+
+       wil->secure_pcp = info->privacy;
+
+       rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+       if (rc)
+               return rc;
+
+       /* Rx VRING. After MAC and beacon */
+       rc = wil_rx_init(wil);
+
+       netif_carrier_on(ndev);
+
+       return rc;
+}
+
+static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
+                               struct net_device *ndev)
+{
+       int rc = 0;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       /* To stop beaconing, set BI to 0 */
+       rc = wmi_set_bcon(wil, 0, wmi_nettype);
+
+       return rc;
+}
+
+static struct cfg80211_ops wil_cfg80211_ops = {
+       .scan = wil_cfg80211_scan,
+       .connect = wil_cfg80211_connect,
+       .disconnect = wil_cfg80211_disconnect,
+       .change_virtual_intf = wil_cfg80211_change_iface,
+       .get_station = wil_cfg80211_get_station,
+       .set_monitor_channel = wil_cfg80211_set_channel,
+       .add_key = wil_cfg80211_add_key,
+       .del_key = wil_cfg80211_del_key,
+       .set_default_key = wil_cfg80211_set_default_key,
+       /* AP mode */
+       .start_ap = wil_cfg80211_start_ap,
+       .stop_ap = wil_cfg80211_stop_ap,
+};
+
+static void wil_wiphy_init(struct wiphy *wiphy)
+{
+       /* TODO: set real value */
+       wiphy->max_scan_ssids = 10;
+       wiphy->max_num_pmkids = 0 /* TODO: */;
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+                                BIT(NL80211_IFTYPE_AP) |
+                                BIT(NL80211_IFTYPE_MONITOR);
+       /* TODO: enable P2P when integrated with supplicant:
+        * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
+        */
+       wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+                       WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+       dev_warn(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
+                __func__, wiphy->flags);
+       wiphy->probe_resp_offload =
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+               NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+       wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+
+       /* TODO: figure this out */
+       wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+       wiphy->cipher_suites = wil_cipher_suites;
+       wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
+       wiphy->mgmt_stypes = wil_mgmt_stypes;
+}
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev)
+{
+       int rc = 0;
+       struct wireless_dev *wdev;
+
+       wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
+       if (!wdev)
+               return ERR_PTR(-ENOMEM);
+
+       wdev->wiphy = wiphy_new(&wil_cfg80211_ops,
+                               sizeof(struct wil6210_priv));
+       if (!wdev->wiphy) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       set_wiphy_dev(wdev->wiphy, dev);
+       wil_wiphy_init(wdev->wiphy);
+
+       rc = wiphy_register(wdev->wiphy);
+       if (rc < 0)
+               goto out_failed_reg;
+
+       return wdev;
+
+out_failed_reg:
+       wiphy_free(wdev->wiphy);
+out:
+       kfree(wdev);
+
+       return ERR_PTR(rc);
+}
+
+void wil_wdev_free(struct wil6210_priv *wil)
+{
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+
+       if (!wdev)
+               return;
+
+       wiphy_unregister(wdev->wiphy);
+       wiphy_free(wdev->wiphy);
+       kfree(wdev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
new file mode 100644 (file)
index 0000000..6a315ba
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef WIL_DBG_HEXDUMP_H_
+#define WIL_DBG_HEXDUMP_H_
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+                            groupsize, buf, len, ascii)        \
+do {                                                           \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor,               \
+               __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT))  \
+               print_hex_dump(KERN_DEBUG, prefix_str,          \
+                              prefix_type, rowsize, groupsize, \
+                              buf, len, ascii);                \
+} while (0)
+
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize,     \
+                                groupsize, buf, len, ascii)            \
+       wil_dynamic_hex_dump(prefix_str, prefix_type, rowsize,          \
+                            groupsize, buf, len, ascii)
+
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len)        \
+       wil_dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
+#else /* defined(CONFIG_DYNAMIC_DEBUG) */
+#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize,     \
+                                groupsize, buf, len, ascii)            \
+       print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize,    \
+                      groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
new file mode 100644 (file)
index 0000000..65fc968
--- /dev/null
@@ -0,0 +1,603 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+
+#include "wil6210.h"
+#include "txrx.h"
+
+/* Nasty hack. Better have per device instances */
+static u32 mem_addr;
+static u32 dbg_txdesc_index;
+
+static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
+                           const char *name, struct vring *vring)
+{
+       void __iomem *x = wmi_addr(wil, vring->hwtail);
+
+       seq_printf(s, "VRING %s = {\n", name);
+       seq_printf(s, "  pa     = 0x%016llx\n", (unsigned long long)vring->pa);
+       seq_printf(s, "  va     = 0x%p\n", vring->va);
+       seq_printf(s, "  size   = %d\n", vring->size);
+       seq_printf(s, "  swtail = %d\n", vring->swtail);
+       seq_printf(s, "  swhead = %d\n", vring->swhead);
+       seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
+       if (x)
+               seq_printf(s, "0x%08x\n", ioread32(x));
+       else
+               seq_printf(s, "???\n");
+
+       if (vring->va && (vring->size < 1025)) {
+               uint i;
+               for (i = 0; i < vring->size; i++) {
+                       volatile struct vring_tx_desc *d = &vring->va[i].tx;
+                       if ((i % 64) == 0 && (i != 0))
+                               seq_printf(s, "\n");
+                       seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
+                                       "S" : (vring->ctx[i] ? "H" : "h"));
+               }
+               seq_printf(s, "\n");
+       }
+       seq_printf(s, "}\n");
+}
+
+static int wil_vring_debugfs_show(struct seq_file *s, void *data)
+{
+       uint i;
+       struct wil6210_priv *wil = s->private;
+
+       wil_print_vring(s, wil, "rx", &wil->vring_rx);
+
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+               struct vring *vring = &(wil->vring_tx[i]);
+               if (vring->va) {
+                       char name[10];
+                       snprintf(name, sizeof(name), "tx_%2d", i);
+                       wil_print_vring(s, wil, name, vring);
+               }
+       }
+
+       return 0;
+}
+
+static int wil_vring_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_vring_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_vring = {
+       .open           = wil_vring_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+static void wil_print_ring(struct seq_file *s, const char *prefix,
+                          void __iomem *off)
+{
+       struct wil6210_priv *wil = s->private;
+       struct wil6210_mbox_ring r;
+       int rsize;
+       uint i;
+
+       wil_memcpy_fromio_32(&r, off, sizeof(r));
+       wil_mbox_ring_le2cpus(&r);
+       /*
+        * we just read memory block from NIC. This memory may be
+        * garbage. Check validity before using it.
+        */
+       rsize = r.size / sizeof(struct wil6210_mbox_ring_desc);
+
+       seq_printf(s, "ring %s = {\n", prefix);
+       seq_printf(s, "  base = 0x%08x\n", r.base);
+       seq_printf(s, "  size = 0x%04x bytes -> %d entries\n", r.size, rsize);
+       seq_printf(s, "  tail = 0x%08x\n", r.tail);
+       seq_printf(s, "  head = 0x%08x\n", r.head);
+       seq_printf(s, "  entry size = %d\n", r.entry_size);
+
+       if (r.size % sizeof(struct wil6210_mbox_ring_desc)) {
+               seq_printf(s, "  ??? size is not multiple of %zd, garbage?\n",
+                          sizeof(struct wil6210_mbox_ring_desc));
+               goto out;
+       }
+
+       if (!wmi_addr(wil, r.base) ||
+           !wmi_addr(wil, r.tail) ||
+           !wmi_addr(wil, r.head)) {
+               seq_printf(s, "  ??? pointers are garbage?\n");
+               goto out;
+       }
+
+       for (i = 0; i < rsize; i++) {
+               struct wil6210_mbox_ring_desc d;
+               struct wil6210_mbox_hdr hdr;
+               size_t delta = i * sizeof(d);
+               void __iomem *x = wil->csr + HOSTADDR(r.base) + delta;
+
+               wil_memcpy_fromio_32(&d, x, sizeof(d));
+
+               seq_printf(s, "  [%2x] %s %s%s 0x%08x", i,
+                          d.sync ? "F" : "E",
+                          (r.tail - r.base == delta) ? "t" : " ",
+                          (r.head - r.base == delta) ? "h" : " ",
+                          le32_to_cpu(d.addr));
+               if (0 == wmi_read_hdr(wil, d.addr, &hdr)) {
+                       u16 len = le16_to_cpu(hdr.len);
+                       seq_printf(s, " -> %04x %04x %04x %02x\n",
+                                  le16_to_cpu(hdr.seq), len,
+                                  le16_to_cpu(hdr.type), hdr.flags);
+                       if (len <= MAX_MBOXITEM_SIZE) {
+                               int n = 0;
+                               unsigned char printbuf[16 * 3 + 2];
+                               unsigned char databuf[MAX_MBOXITEM_SIZE];
+                               void __iomem *src = wmi_buffer(wil, d.addr) +
+                                       sizeof(struct wil6210_mbox_hdr);
+                               /*
+                                * No need to check @src for validity -
+                                * we already validated @d.addr while
+                                * reading header
+                                */
+                               wil_memcpy_fromio_32(databuf, src, len);
+                               while (n < len) {
+                                       int l = min(len - n, 16);
+                                       hex_dump_to_buffer(databuf + n, l,
+                                                          16, 1, printbuf,
+                                                          sizeof(printbuf),
+                                                          false);
+                                       seq_printf(s, "      : %s\n", printbuf);
+                                       n += l;
+                               }
+                       }
+               } else {
+                       seq_printf(s, "\n");
+               }
+       }
+ out:
+       seq_printf(s, "}\n");
+}
+
+static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+
+       wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
+                      offsetof(struct wil6210_mbox_ctl, tx));
+       wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
+                      offsetof(struct wil6210_mbox_ctl, rx));
+
+       return 0;
+}
+
+static int wil_mbox_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_mbox_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_mbox = {
+       .open           = wil_mbox_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+static int wil_debugfs_iomem_x32_set(void *data, u64 val)
+{
+       iowrite32(val, (void __iomem *)data);
+       wmb(); /* make sure write propagated to HW */
+
+       return 0;
+}
+
+static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
+{
+       *val = ioread32((void __iomem *)data);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
+                       wil_debugfs_iomem_x32_set, "0x%08llx\n");
+
+static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
+                                                  mode_t mode,
+                                                  struct dentry *parent,
+                                                  void __iomem *value)
+{
+       return debugfs_create_file(name, mode, parent, (void * __force)value,
+                                  &fops_iomem_x32);
+}
+
+static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
+                                     const char *name,
+                                     struct dentry *parent, u32 off)
+{
+       struct dentry *d = debugfs_create_dir(name, parent);
+
+       if (IS_ERR_OR_NULL(d))
+               return -ENODEV;
+
+       wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off);
+       wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off + 4);
+       wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off + 8);
+       wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d,
+                                    wil->csr + off + 12);
+       wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d,
+                                    wil->csr + off + 16);
+       wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d,
+                                    wil->csr + off + 20);
+       wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d,
+                                    wil->csr + off + 24);
+
+       return 0;
+}
+
+static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
+                                            struct dentry *parent)
+{
+       struct dentry *d = debugfs_create_dir("PSEUDO_ISR", parent);
+
+       if (IS_ERR_OR_NULL(d))
+               return -ENODEV;
+
+       wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+       wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+       wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW));
+
+       return 0;
+}
+
+static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
+                                         struct dentry *parent)
+{
+       struct dentry *d = debugfs_create_dir("ITR_CNT", parent);
+
+       if (IS_ERR_OR_NULL(d))
+               return -ENODEV;
+
+       wil_debugfs_create_iomem_x32("TRSH", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_ITR_CNT_TRSH));
+       wil_debugfs_create_iomem_x32("DATA", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_ITR_CNT_DATA));
+       wil_debugfs_create_iomem_x32("CTL", S_IRUGO, d, wil->csr +
+                                    HOSTADDR(RGF_DMA_ITR_CNT_CRL));
+
+       return 0;
+}
+
+static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+
+       if (a)
+               seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+       else
+               seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+
+       return 0;
+}
+
+static int wil_memread_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_memread_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_memread = {
+       .open           = wil_memread_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+static int wil_default_open(struct inode *inode, struct file *file)
+{
+       if (inode->i_private)
+               file->private_data = inode->i_private;
+
+       return 0;
+}
+
+static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       enum { max_count = 4096 };
+       struct debugfs_blob_wrapper *blob = file->private_data;
+       loff_t pos = *ppos;
+       size_t available = blob->size;
+       void *buf;
+       size_t ret;
+
+       if (pos < 0)
+               return -EINVAL;
+
+       if (pos >= available || !count)
+               return 0;
+
+       if (count > available - pos)
+               count = available - pos;
+       if (count > max_count)
+               count = max_count;
+
+       buf = kmalloc(count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
+                            pos, count);
+
+       ret = copy_to_user(user_buf, buf, count);
+       kfree(buf);
+       if (ret == count)
+               return -EFAULT;
+
+       count -= ret;
+       *ppos = pos + count;
+
+       return count;
+}
+
+static const struct file_operations fops_ioblob = {
+       .read =         wil_read_file_ioblob,
+       .open =         wil_default_open,
+       .llseek =       default_llseek,
+};
+
+static
+struct dentry *wil_debugfs_create_ioblob(const char *name,
+                                        mode_t mode,
+                                        struct dentry *parent,
+                                        struct debugfs_blob_wrapper *blob)
+{
+       return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+}
+/*---reset---*/
+static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
+                                   size_t len, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       /**
+        * BUG:
+        * this code does NOT sync device state with the rest of system
+        * use with care, debug only!!!
+        */
+       rtnl_lock();
+       dev_close(ndev);
+       ndev->flags &= ~IFF_UP;
+       rtnl_unlock();
+       wil_reset(wil);
+
+       return len;
+}
+
+static const struct file_operations fops_reset = {
+       .write = wil_write_file_reset,
+       .open  = wil_default_open,
+};
+/*---------Tx descriptor------------*/
+
+static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       struct vring *vring = &(wil->vring_tx[0]);
+
+       if (!vring->va) {
+               seq_printf(s, "No Tx VRING\n");
+               return 0;
+       }
+
+       if (dbg_txdesc_index < vring->size) {
+               volatile struct vring_tx_desc *d =
+                               &(vring->va[dbg_txdesc_index].tx);
+               volatile u32 *u = (volatile u32 *)d;
+               struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+
+               seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+               seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          u[0], u[1], u[2], u[3]);
+               seq_printf(s, "  DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          u[4], u[5], u[6], u[7]);
+               seq_printf(s, "  SKB = %p\n", skb);
+
+               if (skb) {
+                       unsigned char printbuf[16 * 3 + 2];
+                       int i = 0;
+                       int len = skb_headlen(skb);
+                       void *p = skb->data;
+
+                       seq_printf(s, "    len = %d\n", len);
+
+                       while (i < len) {
+                               int l = min(len - i, 16);
+                               hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+                                                  sizeof(printbuf), false);
+                               seq_printf(s, "      : %s\n", printbuf);
+                               i += l;
+                       }
+               }
+               seq_printf(s, "}\n");
+       } else {
+               seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
+                          dbg_txdesc_index, vring->size);
+       }
+
+       return 0;
+}
+
+static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_txdesc = {
+       .open           = wil_txdesc_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
+/*---------beamforming------------*/
+static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       seq_printf(s,
+                  "TSF : 0x%016llx\n"
+                  "TxMCS : %d\n"
+                  "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n",
+                  wil->stats.tsf, wil->stats.bf_mcs,
+                  wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+                  wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+       return 0;
+}
+
+static int wil_bf_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_bf_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_bf = {
+       .open           = wil_bf_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+/*---------SSID------------*/
+static ssize_t wil_read_file_ssid(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      wdev->ssid, wdev->ssid_len);
+}
+
+static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       if (*ppos != 0) {
+               wil_err(wil, "Unable to set SSID substring from [%d]\n",
+                       (int)*ppos);
+               return -EINVAL;
+       }
+
+       if (count > sizeof(wdev->ssid)) {
+               wil_err(wil, "SSID too long, len = %d\n", (int)count);
+               return -EINVAL;
+       }
+       if (netif_running(ndev)) {
+               wil_err(wil, "Unable to change SSID on running interface\n");
+               return -EINVAL;
+       }
+
+       wdev->ssid_len = count;
+       return simple_write_to_buffer(wdev->ssid, wdev->ssid_len, ppos,
+                                     buf, count);
+}
+
+static const struct file_operations fops_ssid = {
+       .read = wil_read_file_ssid,
+       .write = wil_write_file_ssid,
+       .open  = wil_default_open,
+};
+
+/*----------------*/
+int wil6210_debugfs_init(struct wil6210_priv *wil)
+{
+       struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
+                       wil_to_wiphy(wil)->debugfsdir);
+
+       if (IS_ERR_OR_NULL(dbg))
+               return -ENODEV;
+
+       debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
+       debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
+       debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
+       debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+                          &dbg_txdesc_index);
+       debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
+       debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
+       debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
+                          &wil->secure_pcp);
+
+       wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg,
+                                  HOSTADDR(RGF_USER_USER_ICR));
+       wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg,
+                                  HOSTADDR(RGF_DMA_EP_TX_ICR));
+       wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg,
+                                  HOSTADDR(RGF_DMA_EP_RX_ICR));
+       wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg,
+                                  HOSTADDR(RGF_DMA_EP_MISC_ICR));
+       wil6210_debugfs_create_pseudo_ISR(wil, dbg);
+       wil6210_debugfs_create_ITR_CNT(wil, dbg);
+
+       debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr);
+       debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
+
+       debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+
+       wil->rgf_blob.data = (void * __force)wil->csr + 0;
+       wil->rgf_blob.size = 0xa000;
+       wil_debugfs_create_ioblob("blob_rgf", S_IRUGO, dbg, &wil->rgf_blob);
+
+       wil->fw_code_blob.data = (void * __force)wil->csr + 0x40000;
+       wil->fw_code_blob.size = 0x40000;
+       wil_debugfs_create_ioblob("blob_fw_code", S_IRUGO, dbg,
+                                 &wil->fw_code_blob);
+
+       wil->fw_data_blob.data = (void * __force)wil->csr + 0x80000;
+       wil->fw_data_blob.size = 0x8000;
+       wil_debugfs_create_ioblob("blob_fw_data", S_IRUGO, dbg,
+                                 &wil->fw_data_blob);
+
+       wil->fw_peri_blob.data = (void * __force)wil->csr + 0x88000;
+       wil->fw_peri_blob.size = 0x18000;
+       wil_debugfs_create_ioblob("blob_fw_peri", S_IRUGO, dbg,
+                                 &wil->fw_peri_blob);
+
+       wil->uc_code_blob.data = (void * __force)wil->csr + 0xa0000;
+       wil->uc_code_blob.size = 0x10000;
+       wil_debugfs_create_ioblob("blob_uc_code", S_IRUGO, dbg,
+                                 &wil->uc_code_blob);
+
+       wil->uc_data_blob.data = (void * __force)wil->csr + 0xb0000;
+       wil->uc_data_blob.size = 0x4000;
+       wil_debugfs_create_ioblob("blob_uc_data", S_IRUGO, dbg,
+                                 &wil->uc_data_blob);
+
+       return 0;
+}
+
+void wil6210_debugfs_remove(struct wil6210_priv *wil)
+{
+       debugfs_remove_recursive(wil->debug);
+       wil->debug = NULL;
+}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
new file mode 100644 (file)
index 0000000..38049da
--- /dev/null
@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+
+#include "wil6210.h"
+
+/**
+ * Theory of operation:
+ *
+ * There is ISR pseudo-cause register,
+ * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
+ * Its bits represents OR'ed bits from 3 real ISR registers:
+ * TX, RX, and MISC.
+ *
+ * Registers may be configured to either "write 1 to clear" or
+ * "clear on read" mode
+ *
+ * When handling interrupt, one have to mask/unmask interrupts for the
+ * real ISR registers, or hardware may malfunction.
+ *
+ */
+
+#define WIL6210_IRQ_DISABLE    (0xFFFFFFFFUL)
+#define WIL6210_IMC_RX         BIT_DMA_EP_RX_ICR_RX_DONE
+#define WIL6210_IMC_TX         (BIT_DMA_EP_TX_ICR_TX_DONE | \
+                               BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
+#define WIL6210_IMC_MISC       (ISR_MISC_FW_READY | ISR_MISC_MBOX_EVT)
+
+#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
+                                       BIT_DMA_PSEUDO_CAUSE_TX | \
+                                       BIT_DMA_PSEUDO_CAUSE_MISC))
+
+#if defined(CONFIG_WIL6210_ISR_COR)
+/* configure to Clear-On-Read mode */
+#define WIL_ICR_ICC_VALUE      (0xFFFFFFFFUL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+
+}
+#else /* defined(CONFIG_WIL6210_ISR_COR) */
+/* configure to Write-1-to-Clear mode */
+#define WIL_ICR_ICC_VALUE      (0UL)
+
+static inline void wil_icr_clear(u32 x, void __iomem *addr)
+{
+       iowrite32(x, addr);
+}
+#endif /* defined(CONFIG_WIL6210_ISR_COR) */
+
+static inline u32 wil_ioread32_and_clear(void __iomem *addr)
+{
+       u32 x = ioread32(addr);
+
+       wil_icr_clear(x, addr);
+
+       return x;
+}
+
+static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                 offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                 offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                 offsetof(struct RGF_ICR, IMS));
+}
+
+static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
+                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+
+       clear_bit(wil_status_irqen, &wil->status);
+}
+
+static void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IMC_TX, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                 offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IMC_RX, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                 offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+{
+       iowrite32(WIL6210_IMC_MISC, wil->csr +
+                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                 offsetof(struct RGF_ICR, IMC));
+}
+
+static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       set_bit(wil_status_irqen, &wil->status);
+
+       iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
+                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+}
+
+void wil6210_disable_irq(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       wil6210_mask_irq_tx(wil);
+       wil6210_mask_irq_rx(wil);
+       wil6210_mask_irq_misc(wil);
+       wil6210_mask_irq_pseudo(wil);
+}
+
+void wil6210_enable_irq(struct wil6210_priv *wil)
+{
+       wil_dbg_IRQ(wil, "%s()\n", __func__);
+
+       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                 offsetof(struct RGF_ICR, ICC));
+       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                 offsetof(struct RGF_ICR, ICC));
+       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                 offsetof(struct RGF_ICR, ICC));
+
+       wil6210_unmask_irq_pseudo(wil);
+       wil6210_unmask_irq_tx(wil);
+       wil6210_unmask_irq_rx(wil);
+       wil6210_unmask_irq_misc(wil);
+}
+
+static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil_ioread32_and_clear(wil->csr +
+                                        HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                                        offsetof(struct RGF_ICR, ICR));
+
+       wil_dbg_IRQ(wil, "ISR RX 0x%08x\n", isr);
+
+       if (!isr) {
+               wil_err(wil, "spurious IRQ: RX\n");
+               return IRQ_NONE;
+       }
+
+       wil6210_mask_irq_rx(wil);
+
+       if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
+               wil_dbg_IRQ(wil, "RX done\n");
+               isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
+               wil_rx_handle(wil);
+       }
+
+       if (isr)
+               wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
+
+       wil6210_unmask_irq_rx(wil);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil_ioread32_and_clear(wil->csr +
+                                        HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                                        offsetof(struct RGF_ICR, ICR));
+
+       wil_dbg_IRQ(wil, "ISR TX 0x%08x\n", isr);
+
+       if (!isr) {
+               wil_err(wil, "spurious IRQ: TX\n");
+               return IRQ_NONE;
+       }
+
+       wil6210_mask_irq_tx(wil);
+
+       if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
+               uint i;
+               wil_dbg_IRQ(wil, "TX done\n");
+               isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+               for (i = 0; i < 24; i++) {
+                       u32 mask = BIT_DMA_EP_TX_ICR_TX_DONE_N(i);
+                       if (isr & mask) {
+                               isr &= ~mask;
+                               wil_dbg_IRQ(wil, "TX done(%i)\n", i);
+                               wil_tx_complete(wil, i);
+                       }
+               }
+       }
+
+       if (isr)
+               wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
+
+       wil6210_unmask_irq_tx(wil);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil_ioread32_and_clear(wil->csr +
+                                        HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                                        offsetof(struct RGF_ICR, ICR));
+
+       wil_dbg_IRQ(wil, "ISR MISC 0x%08x\n", isr);
+
+       if (!isr) {
+               wil_err(wil, "spurious IRQ: MISC\n");
+               return IRQ_NONE;
+       }
+
+       wil6210_mask_irq_misc(wil);
+
+       if (isr & ISR_MISC_FW_READY) {
+               wil_dbg_IRQ(wil, "IRQ: FW ready\n");
+               /**
+                * Actual FW ready indicated by the
+                * WMI_FW_READY_EVENTID
+                */
+               isr &= ~ISR_MISC_FW_READY;
+       }
+
+       wil->isr_misc = isr;
+
+       if (isr) {
+               return IRQ_WAKE_THREAD;
+       } else {
+               wil6210_unmask_irq_misc(wil);
+               return IRQ_HANDLED;
+       }
+}
+
+static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+       u32 isr = wil->isr_misc;
+
+       wil_dbg_IRQ(wil, "Thread ISR MISC 0x%08x\n", isr);
+
+       if (isr & ISR_MISC_MBOX_EVT) {
+               wil_dbg_IRQ(wil, "MBOX event\n");
+               wmi_recv_cmd(wil);
+               isr &= ~ISR_MISC_MBOX_EVT;
+       }
+
+       if (isr)
+               wil_err(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
+
+       wil->isr_misc = 0;
+
+       wil6210_unmask_irq_misc(wil);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * thread IRQ handler
+ */
+static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
+{
+       struct wil6210_priv *wil = cookie;
+
+       wil_dbg_IRQ(wil, "Thread IRQ\n");
+       /* Discover real IRQ cause */
+       if (wil->isr_misc)
+               wil6210_irq_misc_thread(irq, cookie);
+
+       wil6210_unmask_irq_pseudo(wil);
+
+       return IRQ_HANDLED;
+}
+
+/* DEBUG
+ * There is subtle bug in hardware that causes IRQ to raise when it should be
+ * masked. It is quite rare and hard to debug.
+ *
+ * Catch irq issue if it happens and print all I can.
+ */
+static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
+{
+       if (!test_bit(wil_status_irqen, &wil->status)) {
+               u32 icm_rx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                               offsetof(struct RGF_ICR, ICM));
+               u32 icr_rx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                               offsetof(struct RGF_ICR, ICR));
+               u32 imv_rx = ioread32(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                               offsetof(struct RGF_ICR, IMV));
+               u32 icm_tx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                               offsetof(struct RGF_ICR, ICM));
+               u32 icr_tx = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                               offsetof(struct RGF_ICR, ICR));
+               u32 imv_tx = ioread32(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                               offsetof(struct RGF_ICR, IMV));
+               u32 icm_misc = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                               offsetof(struct RGF_ICR, ICM));
+               u32 icr_misc = wil_ioread32_and_clear(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                               offsetof(struct RGF_ICR, ICR));
+               u32 imv_misc = ioread32(wil->csr +
+                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                               offsetof(struct RGF_ICR, IMV));
+               wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
+                               "Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+                               "Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
+                               "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
+                               pseudo_cause,
+                               icm_rx, icr_rx, imv_rx,
+                               icm_tx, icr_tx, imv_tx,
+                               icm_misc, icr_misc, imv_misc);
+
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static irqreturn_t wil6210_hardirq(int irq, void *cookie)
+{
+       irqreturn_t rc = IRQ_HANDLED;
+       struct wil6210_priv *wil = cookie;
+       u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+
+       /**
+        * pseudo_cause is Clear-On-Read, no need to ACK
+        */
+       if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))
+               return IRQ_NONE;
+
+       /* FIXME: IRQ mask debug */
+       if (wil6210_debug_irq_mask(wil, pseudo_cause))
+               return IRQ_NONE;
+
+       wil6210_mask_irq_pseudo(wil);
+
+       /* Discover real IRQ cause
+        * There are 2 possible phases for every IRQ:
+        * - hard IRQ handler called right here
+        * - threaded handler called later
+        *
+        * Hard IRQ handler reads and clears ISR.
+        *
+        * If threaded handler requested, hard IRQ handler
+        * returns IRQ_WAKE_THREAD and saves ISR register value
+        * for the threaded handler use.
+        *
+        * voting for wake thread - need at least 1 vote
+        */
+       if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
+           (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
+               rc = IRQ_WAKE_THREAD;
+
+       if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
+           (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
+               rc = IRQ_WAKE_THREAD;
+
+       if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
+           (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
+               rc = IRQ_WAKE_THREAD;
+
+       /* if thread is requested, it will unmask IRQ */
+       if (rc != IRQ_WAKE_THREAD)
+               wil6210_unmask_irq_pseudo(wil);
+
+       wil_dbg_IRQ(wil, "Hard IRQ 0x%08x\n", pseudo_cause);
+
+       return rc;
+}
+
+static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
+{
+       int rc;
+       /*
+        * IRQ's are in the following order:
+        * - Tx
+        * - Rx
+        * - Misc
+        */
+
+       rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
+                        WIL_NAME"_tx", wil);
+       if (rc)
+               return rc;
+
+       rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
+                        WIL_NAME"_rx", wil);
+       if (rc)
+               goto free0;
+
+       rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
+                                 wil6210_irq_misc_thread,
+                                 IRQF_SHARED, WIL_NAME"_misc", wil);
+       if (rc)
+               goto free1;
+
+       return 0;
+       /* error branch */
+free1:
+       free_irq(irq + 1, wil);
+free0:
+       free_irq(irq, wil);
+
+       return rc;
+}
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+{
+       int rc;
+       if (wil->n_msi == 3)
+               rc = wil6210_request_3msi(wil, irq);
+       else
+               rc = request_threaded_irq(irq, wil6210_hardirq,
+                                         wil6210_thread_irq,
+                                         wil->n_msi ? 0 : IRQF_SHARED,
+                                         WIL_NAME, wil);
+       if (rc)
+               return rc;
+
+       wil6210_enable_irq(wil);
+
+       return 0;
+}
+
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
+{
+       wil6210_disable_irq(wil);
+       free_irq(irq, wil);
+       if (wil->n_msi == 3) {
+               free_irq(irq + 1, wil);
+               free_irq(irq + 2, wil);
+       }
+}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
new file mode 100644 (file)
index 0000000..95fcd36
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include <linux/wireless.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/if_arp.h>
+
+#include "wil6210.h"
+
+/*
+ * Due to a hardware issue,
+ * one has to read/write to/from NIC in 32-bit chunks;
+ * regular memcpy_fromio and siblings will
+ * not work on 64-bit platform - it uses 64-bit transactions
+ *
+ * Force 32-bit transactions to enable NIC on 64-bit platforms
+ *
+ * To avoid byte swap on big endian host, __raw_{read|write}l
+ * should be used - {read|write}l would swap bytes to provide
+ * little endian on PCI value in host endianness.
+ */
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+                         size_t count)
+{
+       u32 *d = dst;
+       const volatile u32 __iomem *s = src;
+
+       /* size_t is unsigned, if (count%4 != 0) it will wrap */
+       for (count += 4; count > 4; count -= 4)
+               *d++ = __raw_readl(s++);
+}
+
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+                       size_t count)
+{
+       volatile u32 __iomem *d = dst;
+       const u32 *s = src;
+
+       for (count += 4; count > 4; count -= 4)
+               __raw_writel(*s++, d++);
+}
+
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+       uint i;
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+
+       wil_dbg(wil, "%s()\n", __func__);
+
+       wil_link_off(wil);
+       clear_bit(wil_status_fwconnected, &wil->status);
+
+       switch (wdev->sme_state) {
+       case CFG80211_SME_CONNECTED:
+               cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                     NULL, 0, GFP_KERNEL);
+               break;
+       case CFG80211_SME_CONNECTING:
+               cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+                                       WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                       GFP_KERNEL);
+               break;
+       default:
+               ;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
+               wil_vring_fini_tx(wil, i);
+}
+
+static void wil_disconnect_worker(struct work_struct *work)
+{
+       struct wil6210_priv *wil = container_of(work,
+                       struct wil6210_priv, disconnect_worker);
+
+       _wil6210_disconnect(wil, NULL);
+}
+
+static void wil_connect_timer_fn(ulong x)
+{
+       struct wil6210_priv *wil = (void *)x;
+
+       wil_dbg(wil, "Connect timeout\n");
+
+       /* reschedule to thread context - disconnect won't
+        * run from atomic context
+        */
+       schedule_work(&wil->disconnect_worker);
+}
+
+int wil_priv_init(struct wil6210_priv *wil)
+{
+       wil_dbg(wil, "%s()\n", __func__);
+
+       mutex_init(&wil->mutex);
+       mutex_init(&wil->wmi_mutex);
+
+       init_completion(&wil->wmi_ready);
+
+       wil->pending_connect_cid = -1;
+       setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
+
+       INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+       INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
+       INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+
+       INIT_LIST_HEAD(&wil->pending_wmi_ev);
+       spin_lock_init(&wil->wmi_ev_lock);
+
+       wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi");
+       if (!wil->wmi_wq)
+               return -EAGAIN;
+
+       wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect");
+       if (!wil->wmi_wq_conn) {
+               destroy_workqueue(wil->wmi_wq);
+               return -EAGAIN;
+       }
+
+       /* make shadow copy of registers that should not change on run time */
+       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+                            sizeof(struct wil6210_mbox_ctl));
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+       return 0;
+}
+
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+       del_timer_sync(&wil->connect_timer);
+       _wil6210_disconnect(wil, bssid);
+}
+
+void wil_priv_deinit(struct wil6210_priv *wil)
+{
+       cancel_work_sync(&wil->disconnect_worker);
+       wil6210_disconnect(wil, NULL);
+       wmi_event_flush(wil);
+       destroy_workqueue(wil->wmi_wq_conn);
+       destroy_workqueue(wil->wmi_wq);
+}
+
+static void wil_target_reset(struct wil6210_priv *wil)
+{
+       wil_dbg(wil, "Resetting...\n");
+
+       /* register write */
+#define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
+       /* register set = read, OR, write */
+#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
+               wil->csr + HOSTADDR(a))
+
+       /* hpal_perst_from_pad_src_n_mask */
+       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
+       /* car_perst_rst_src_n_mask */
+       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+
+       W(RGF_USER_MAC_CPU_0,  BIT(1)); /* mac_cpu_man_rst */
+       W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+
+       msleep(100);
+
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+
+       msleep(100);
+
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+
+       msleep(2000);
+
+       W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
+
+       msleep(2000);
+
+       wil_dbg(wil, "Reset completed\n");
+
+#undef W
+#undef S
+}
+
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
+{
+       le32_to_cpus(&r->base);
+       le16_to_cpus(&r->entry_size);
+       le16_to_cpus(&r->size);
+       le32_to_cpus(&r->tail);
+       le32_to_cpus(&r->head);
+}
+
+static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
+{
+       ulong to = msecs_to_jiffies(1000);
+       ulong left = wait_for_completion_timeout(&wil->wmi_ready, to);
+       if (0 == left) {
+               wil_err(wil, "Firmware not ready\n");
+               return -ETIME;
+       } else {
+               wil_dbg(wil, "FW ready after %d ms\n",
+                       jiffies_to_msecs(to-left));
+       }
+       return 0;
+}
+
+/*
+ * We reset all the structures, and we reset the UMAC.
+ * After calling this routine, you're expected to reload
+ * the firmware.
+ */
+int wil_reset(struct wil6210_priv *wil)
+{
+       int rc;
+
+       cancel_work_sync(&wil->disconnect_worker);
+       wil6210_disconnect(wil, NULL);
+
+       wmi_event_flush(wil);
+
+       flush_workqueue(wil->wmi_wq);
+       flush_workqueue(wil->wmi_wq_conn);
+
+       wil6210_disable_irq(wil);
+       wil->status = 0;
+
+       /* TODO: put MAC in reset */
+       wil_target_reset(wil);
+
+       /* init after reset */
+       wil->pending_connect_cid = -1;
+       INIT_COMPLETION(wil->wmi_ready);
+
+       /* make shadow copy of registers that should not change on run time */
+       wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+                            sizeof(struct wil6210_mbox_ctl));
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+       wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+
+       /* TODO: release MAC reset */
+       wil6210_enable_irq(wil);
+
+       /* we just started MAC, wait for FW ready */
+       rc = wil_wait_for_fw_ready(wil);
+
+       return rc;
+}
+
+
+void wil_link_on(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg(wil, "%s()\n", __func__);
+
+       netif_carrier_on(ndev);
+       netif_tx_wake_all_queues(ndev);
+}
+
+void wil_link_off(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg(wil, "%s()\n", __func__);
+
+       netif_tx_stop_all_queues(ndev);
+       netif_carrier_off(ndev);
+}
+
+static int __wil_up(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct ieee80211_channel *channel = wdev->preset_chandef.chan;
+       int rc;
+       int bi;
+       u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       rc = wil_reset(wil);
+       if (rc)
+               return rc;
+
+       /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
+       wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_STATION:
+               wil_dbg(wil, "type: STATION\n");
+               bi = 0;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_AP:
+               wil_dbg(wil, "type: AP\n");
+               bi = 100;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               wil_dbg(wil, "type: P2P_CLIENT\n");
+               bi = 0;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_P2P_GO:
+               wil_dbg(wil, "type: P2P_GO\n");
+               bi = 100;
+               ndev->type = ARPHRD_ETHER;
+               break;
+       case NL80211_IFTYPE_MONITOR:
+               wil_dbg(wil, "type: Monitor\n");
+               bi = 0;
+               ndev->type = ARPHRD_IEEE80211_RADIOTAP;
+               /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* Apply profile in the following order: */
+       /* SSID and channel for the AP */
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               if (wdev->ssid_len == 0) {
+                       wil_err(wil, "SSID not set\n");
+                       return -EINVAL;
+               }
+               wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+               if (channel)
+                       wmi_set_channel(wil, channel->hw_value);
+               break;
+       default:
+               ;
+       }
+
+       /* MAC address - pre-requisite for other commands */
+       wmi_set_mac_address(wil, ndev->dev_addr);
+
+       /* Set up beaconing if required. */
+       rc = wmi_set_bcon(wil, bi, wmi_nettype);
+       if (rc)
+               return rc;
+
+       /* Rx VRING. After MAC and beacon */
+       wil_rx_init(wil);
+
+       return 0;
+}
+
+int wil_up(struct wil6210_priv *wil)
+{
+       int rc;
+
+       mutex_lock(&wil->mutex);
+       rc = __wil_up(wil);
+       mutex_unlock(&wil->mutex);
+
+       return rc;
+}
+
+static int __wil_down(struct wil6210_priv *wil)
+{
+       if (wil->scan_request) {
+               cfg80211_scan_done(wil->scan_request, true);
+               wil->scan_request = NULL;
+       }
+
+       wil6210_disconnect(wil, NULL);
+       wil_rx_fini(wil);
+
+       return 0;
+}
+
+int wil_down(struct wil6210_priv *wil)
+{
+       int rc;
+
+       mutex_lock(&wil->mutex);
+       rc = __wil_down(wil);
+       mutex_unlock(&wil->mutex);
+
+       return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
new file mode 100644 (file)
index 0000000..3068b5c
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+
+#include "wil6210.h"
+
+static int wil_open(struct net_device *ndev)
+{
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+       return wil_up(wil);
+}
+
+static int wil_stop(struct net_device *ndev)
+{
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+
+       return wil_down(wil);
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 3
+ * AC_VI -> queue 2
+ * AC_BE -> queue 1
+ * AC_BK -> queue 0
+ */
+static u16 wil_select_queue(struct net_device *ndev, struct sk_buff *skb)
+{
+       static const u16 wil_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+       u16 rc;
+
+       skb->priority = cfg80211_classify8021d(skb);
+
+       rc = wil_1d_to_queue[skb->priority];
+
+       wil_dbg_TXRX(wil, "%s() %d -> %d\n", __func__, (int)skb->priority,
+                    (int)rc);
+
+       return rc;
+}
+
+static const struct net_device_ops wil_netdev_ops = {
+       .ndo_open               = wil_open,
+       .ndo_stop               = wil_stop,
+       .ndo_start_xmit         = wil_start_xmit,
+       .ndo_select_queue       = wil_select_queue,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr)
+{
+       struct net_device *ndev;
+       struct wireless_dev *wdev;
+       struct wil6210_priv *wil;
+       struct ieee80211_channel *ch;
+       int rc = 0;
+
+       wdev = wil_cfg80211_init(dev);
+       if (IS_ERR(wdev)) {
+               dev_err(dev, "wil_cfg80211_init failed\n");
+               return wdev;
+       }
+
+       wil = wdev_to_wil(wdev);
+       wil->csr = csr;
+       wil->wdev = wdev;
+
+       rc = wil_priv_init(wil);
+       if (rc) {
+               dev_err(dev, "wil_priv_init failed\n");
+               goto out_wdev;
+       }
+
+       wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
+       /* default monitor channel */
+       ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+       cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
+
+       ndev = alloc_netdev_mqs(0, "wlan%d", ether_setup, WIL6210_TX_QUEUES, 1);
+       if (!ndev) {
+               dev_err(dev, "alloc_netdev_mqs failed\n");
+               rc = -ENOMEM;
+               goto out_priv;
+       }
+
+       ndev->netdev_ops = &wil_netdev_ops;
+       ndev->ieee80211_ptr = wdev;
+       SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
+       wdev->netdev = ndev;
+
+       wil_link_off(wil);
+
+       return wil;
+
+ out_priv:
+       wil_priv_deinit(wil);
+
+ out_wdev:
+       wil_wdev_free(wil);
+
+       return ERR_PTR(rc);
+}
+
+void wil_if_free(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       if (!ndev)
+               return;
+
+       free_netdev(ndev);
+       wil_priv_deinit(wil);
+       wil_wdev_free(wil);
+}
+
+int wil_if_add(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       int rc;
+
+       rc = register_netdev(ndev);
+       if (rc < 0) {
+               dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
+               return rc;
+       }
+
+       wil_link_off(wil);
+
+       return 0;
+}
+
+void wil_if_remove(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       unregister_netdev(ndev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
new file mode 100644 (file)
index 0000000..0fc83ed
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+
+static int use_msi = 1;
+module_param(use_msi, int, S_IRUGO);
+MODULE_PARM_DESC(use_msi,
+                " Use MSI interrupt: "
+                "0 - don't, 1 - (default) - single, or 3");
+
+/* Bus ops */
+static int wil_if_pcie_enable(struct wil6210_priv *wil)
+{
+       struct pci_dev *pdev = wil->pdev;
+       int rc;
+
+       pci_set_master(pdev);
+
+       /*
+        * how many MSI interrupts to request?
+        */
+       switch (use_msi) {
+       case 3:
+       case 1:
+       case 0:
+               break;
+       default:
+               wil_err(wil, "Invalid use_msi=%d, default to 1\n",
+                       use_msi);
+               use_msi = 1;
+       }
+       wil->n_msi = use_msi;
+       if (wil->n_msi) {
+               wil_dbg(wil, "Setup %d MSI interrupts\n", use_msi);
+               rc = pci_enable_msi_block(pdev, wil->n_msi);
+               if (rc && (wil->n_msi == 3)) {
+                       wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+                       wil->n_msi = 1;
+                       rc = pci_enable_msi_block(pdev, wil->n_msi);
+               }
+               if (rc) {
+                       wil_err(wil, "pci_enable_msi failed, use INTx\n");
+                       wil->n_msi = 0;
+               }
+       } else {
+               wil_dbg(wil, "MSI interrupts disabled, use INTx\n");
+       }
+
+       rc = wil6210_init_irq(wil, pdev->irq);
+       if (rc)
+               goto stop_master;
+
+       /* need reset here to obtain MAC */
+       rc = wil_reset(wil);
+       if (rc)
+               goto release_irq;
+
+       return 0;
+
+ release_irq:
+       wil6210_fini_irq(wil, pdev->irq);
+       /* safe to call if no MSI */
+       pci_disable_msi(pdev);
+ stop_master:
+       pci_clear_master(pdev);
+       return rc;
+}
+
+static int wil_if_pcie_disable(struct wil6210_priv *wil)
+{
+       struct pci_dev *pdev = wil->pdev;
+
+       pci_clear_master(pdev);
+       /* disable and release IRQ */
+       wil6210_fini_irq(wil, pdev->irq);
+       /* safe to call if no MSI */
+       pci_disable_msi(pdev);
+       /* TODO: disable HW */
+
+       return 0;
+}
+
+static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct wil6210_priv *wil;
+       struct device *dev = &pdev->dev;
+       void __iomem *csr;
+       int rc;
+
+       /* check HW */
+       dev_info(&pdev->dev, WIL_NAME " device found [%04x:%04x] (rev %x)\n",
+                (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+
+       if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
+               dev_err(&pdev->dev, "Not " WIL_NAME "? "
+                       "BAR0 size is %lu while expecting %lu\n",
+                       (ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+               return -ENODEV;
+       }
+
+       rc = pci_enable_device(pdev);
+       if (rc) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               return -ENODEV;
+       }
+       /* rollback to err_disable_pdev */
+
+       rc = pci_request_region(pdev, 0, WIL_NAME);
+       if (rc) {
+               dev_err(&pdev->dev, "pci_request_region failed\n");
+               goto err_disable_pdev;
+       }
+       /* rollback to err_release_reg */
+
+       csr = pci_ioremap_bar(pdev, 0);
+       if (!csr) {
+               dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+               rc = -ENODEV;
+               goto err_release_reg;
+       }
+       /* rollback to err_iounmap */
+       dev_info(&pdev->dev, "CSR at %pR -> %p\n", &pdev->resource[0], csr);
+
+       wil = wil_if_alloc(dev, csr);
+       if (IS_ERR(wil)) {
+               rc = (int)PTR_ERR(wil);
+               dev_err(dev, "wil_if_alloc failed: %d\n", rc);
+               goto err_iounmap;
+       }
+       /* rollback to if_free */
+
+       pci_set_drvdata(pdev, wil);
+       wil->pdev = pdev;
+
+       /* FW should raise IRQ when ready */
+       rc = wil_if_pcie_enable(wil);
+       if (rc) {
+               wil_err(wil, "Enable device failed\n");
+               goto if_free;
+       }
+       /* rollback to bus_disable */
+
+       rc = wil_if_add(wil);
+       if (rc) {
+               wil_err(wil, "wil_if_add failed: %d\n", rc);
+               goto bus_disable;
+       }
+
+       wil6210_debugfs_init(wil);
+
+       /* check FW is alive */
+       wmi_echo(wil);
+
+       return 0;
+
+ bus_disable:
+       wil_if_pcie_disable(wil);
+ if_free:
+       wil_if_free(wil);
+ err_iounmap:
+       pci_iounmap(pdev, csr);
+ err_release_reg:
+       pci_release_region(pdev, 0);
+ err_disable_pdev:
+       pci_disable_device(pdev);
+
+       return rc;
+}
+
+static void wil_pcie_remove(struct pci_dev *pdev)
+{
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil6210_debugfs_remove(wil);
+       wil_if_pcie_disable(wil);
+       wil_if_remove(wil);
+       wil_if_free(wil);
+       pci_iounmap(pdev, wil->csr);
+       pci_release_region(pdev, 0);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
+       { PCI_DEVICE(0x1ae9, 0x0301) },
+       { /* end: all zeroes */ },
+};
+MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
+
+static struct pci_driver wil6210_driver = {
+       .probe          = wil_pcie_probe,
+       .remove         = wil_pcie_remove,
+       .id_table       = wil6210_pcie_ids,
+       .name           = WIL_NAME,
+};
+
+module_pci_driver(wil6210_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Qualcomm Atheros <wil6210@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Driver for 60g WiFi WIL6210 card");
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
new file mode 100644 (file)
index 0000000..f29c294
--- /dev/null
@@ -0,0 +1,871 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hardirq.h>
+#include <net/ieee80211_radiotap.h>
+#include <linux/if_arp.h>
+#include <linux/moduleparam.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+#include "txrx.h"
+
+static bool rtap_include_phy_info;
+module_param(rtap_include_phy_info, bool, S_IRUGO);
+MODULE_PARM_DESC(rtap_include_phy_info,
+                " Include PHY info in the radiotap header, default - no");
+
+static inline int wil_vring_is_empty(struct vring *vring)
+{
+       return vring->swhead == vring->swtail;
+}
+
+static inline u32 wil_vring_next_tail(struct vring *vring)
+{
+       return (vring->swtail + 1) % vring->size;
+}
+
+static inline void wil_vring_advance_head(struct vring *vring, int n)
+{
+       vring->swhead = (vring->swhead + n) % vring->size;
+}
+
+static inline int wil_vring_is_full(struct vring *vring)
+{
+       return wil_vring_next_tail(vring) == vring->swhead;
+}
+/*
+ * Available space in Tx Vring
+ */
+static inline int wil_vring_avail_tx(struct vring *vring)
+{
+       u32 swhead = vring->swhead;
+       u32 swtail = vring->swtail;
+       int used = (vring->size + swhead - swtail) % vring->size;
+
+       return vring->size - used - 1;
+}
+
+static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
+{
+       struct device *dev = wil_to_dev(wil);
+       size_t sz = vring->size * sizeof(vring->va[0]);
+       uint i;
+
+       BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
+
+       vring->swhead = 0;
+       vring->swtail = 0;
+       vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+       if (!vring->ctx) {
+               wil_err(wil, "vring_alloc [%d] failed to alloc ctx mem\n",
+                       vring->size);
+               vring->va = NULL;
+               return -ENOMEM;
+       }
+       /*
+        * vring->va should be aligned on its size rounded up to power of 2
+        * This is granted by the dma_alloc_coherent
+        */
+       vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
+       if (!vring->va) {
+               wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
+                       vring->size);
+               kfree(vring->ctx);
+               vring->ctx = NULL;
+               return -ENOMEM;
+       }
+       /* initially, all descriptors are SW owned
+        * For Tx and Rx, ownership bit is at the same location, thus
+        * we can use any
+        */
+       for (i = 0; i < vring->size; i++) {
+               volatile struct vring_tx_desc *d = &(vring->va[i].tx);
+               d->dma.status = TX_DMA_STATUS_DU;
+       }
+
+       wil_dbg(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
+               vring->va, (unsigned long long)vring->pa, vring->ctx);
+
+       return 0;
+}
+
+static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
+                          int tx)
+{
+       struct device *dev = wil_to_dev(wil);
+       size_t sz = vring->size * sizeof(vring->va[0]);
+
+       while (!wil_vring_is_empty(vring)) {
+               if (tx) {
+                       volatile struct vring_tx_desc *d =
+                                       &vring->va[vring->swtail].tx;
+                       dma_addr_t pa = d->dma.addr_low |
+                                       ((u64)d->dma.addr_high << 32);
+                       struct sk_buff *skb = vring->ctx[vring->swtail];
+                       if (skb) {
+                               dma_unmap_single(dev, pa, d->dma.length,
+                                                DMA_TO_DEVICE);
+                               dev_kfree_skb_any(skb);
+                               vring->ctx[vring->swtail] = NULL;
+                       } else {
+                               dma_unmap_page(dev, pa, d->dma.length,
+                                              DMA_TO_DEVICE);
+                       }
+                       vring->swtail = wil_vring_next_tail(vring);
+               } else { /* rx */
+                       volatile struct vring_rx_desc *d =
+                                       &vring->va[vring->swtail].rx;
+                       dma_addr_t pa = d->dma.addr_low |
+                                       ((u64)d->dma.addr_high << 32);
+                       struct sk_buff *skb = vring->ctx[vring->swhead];
+                       dma_unmap_single(dev, pa, d->dma.length,
+                                        DMA_FROM_DEVICE);
+                       kfree_skb(skb);
+                       wil_vring_advance_head(vring, 1);
+               }
+       }
+       dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
+       kfree(vring->ctx);
+       vring->pa = 0;
+       vring->va = NULL;
+       vring->ctx = NULL;
+}
+
+/**
+ * Allocate one skb for Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
+                              u32 i, int headroom)
+{
+       struct device *dev = wil_to_dev(wil);
+       unsigned int sz = RX_BUF_LEN;
+       volatile struct vring_rx_desc *d = &(vring->va[i].rx);
+       dma_addr_t pa;
+
+       /* TODO align */
+       struct sk_buff *skb = dev_alloc_skb(sz + headroom);
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       skb_reserve(skb, headroom);
+       skb_put(skb, sz);
+
+       pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(dev, pa))) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+       d->dma.addr_low = lower_32_bits(pa);
+       d->dma.addr_high = (u16)upper_32_bits(pa);
+       /* ip_length don't care */
+       /* b11 don't care */
+       /* error don't care */
+       d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+       d->dma.length = sz;
+       vring->ctx[i] = skb;
+
+       return 0;
+}
+
+/**
+ * Adds radiotap header
+ *
+ * Any error indicated as "Bad FCS"
+ *
+ * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
+ *  - Rx descriptor: 32 bytes
+ *  - Phy info
+ */
+static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
+                                      struct sk_buff *skb,
+                                      volatile struct vring_rx_desc *d)
+{
+       struct wireless_dev *wdev = wil->wdev;
+       struct wil6210_rtap {
+               struct ieee80211_radiotap_header rthdr;
+               /* fields should be in the order of bits in rthdr.it_present */
+               /* flags */
+               u8 flags;
+               /* channel */
+               __le16 chnl_freq __aligned(2);
+               __le16 chnl_flags;
+               /* MCS */
+               u8 mcs_present;
+               u8 mcs_flags;
+               u8 mcs_index;
+       } __packed;
+       struct wil6210_rtap_vendor {
+               struct wil6210_rtap rtap;
+               /* vendor */
+               u8 vendor_oui[3] __aligned(2);
+               u8 vendor_ns;
+               __le16 vendor_skip;
+               u8 vendor_data[0];
+       } __packed;
+       struct wil6210_rtap_vendor *rtap_vendor;
+       int rtap_len = sizeof(struct wil6210_rtap);
+       int phy_length = 0; /* phy info header size, bytes */
+       static char phy_data[128];
+       struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+       if (rtap_include_phy_info) {
+               rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
+               /* calculate additional length */
+               if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
+                       /**
+                        * PHY info starts from 8-byte boundary
+                        * there are 8-byte lines, last line may be partially
+                        * written (HW bug), thus FW configures for last line
+                        * to be excessive. Driver skips this last line.
+                        */
+                       int len = min_t(int, 8 + sizeof(phy_data),
+                                       wil_rxdesc_phy_length(d));
+                       if (len > 8) {
+                               void *p = skb_tail_pointer(skb);
+                               void *pa = PTR_ALIGN(p, 8);
+                               if (skb_tailroom(skb) >= len + (pa - p)) {
+                                       phy_length = len - 8;
+                                       memcpy(phy_data, pa, phy_length);
+                               }
+                       }
+               }
+               rtap_len += phy_length;
+       }
+
+       if (skb_headroom(skb) < rtap_len &&
+           pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
+               wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
+               return;
+       }
+
+       rtap_vendor = (void *)skb_push(skb, rtap_len);
+       memset(rtap_vendor, 0, rtap_len);
+
+       rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
+       rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
+       rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
+                       (1 << IEEE80211_RADIOTAP_FLAGS) |
+                       (1 << IEEE80211_RADIOTAP_CHANNEL) |
+                       (1 << IEEE80211_RADIOTAP_MCS));
+       if (d->dma.status & RX_DMA_STATUS_ERROR)
+               rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
+
+       rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
+       rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
+
+       rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
+       rtap_vendor->rtap.mcs_flags = 0;
+       rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
+
+       if (rtap_include_phy_info) {
+               rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
+                               IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
+               /* OUI for Wilocity 04:ce:14 */
+               rtap_vendor->vendor_oui[0] = 0x04;
+               rtap_vendor->vendor_oui[1] = 0xce;
+               rtap_vendor->vendor_oui[2] = 0x14;
+               rtap_vendor->vendor_ns = 1;
+               /* Rx descriptor + PHY data  */
+               rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
+                                                      phy_length);
+               memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
+               memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
+                      phy_length);
+       }
+}
+
+/*
+ * Fast swap in place between 2 registers
+ */
+static void wil_swap_u16(u16 *a, u16 *b)
+{
+       *a ^= *b;
+       *b ^= *a;
+       *a ^= *b;
+}
+
+static void wil_swap_ethaddr(void *data)
+{
+       struct ethhdr *eth = data;
+       u16 *s = (u16 *)eth->h_source;
+       u16 *d = (u16 *)eth->h_dest;
+
+       wil_swap_u16(s++, d++);
+       wil_swap_u16(s++, d++);
+       wil_swap_u16(s, d);
+}
+
+/**
+ * reap 1 frame from @swhead
+ *
+ * Safe to call from IRQ
+ */
+static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
+                                        struct vring *vring)
+{
+       struct device *dev = wil_to_dev(wil);
+       struct net_device *ndev = wil_to_ndev(wil);
+       volatile struct vring_rx_desc *d;
+       struct sk_buff *skb;
+       dma_addr_t pa;
+       unsigned int sz = RX_BUF_LEN;
+       u8 ftype;
+       u8 ds_bits;
+
+       if (wil_vring_is_empty(vring))
+               return NULL;
+
+       d = &(vring->va[vring->swhead].rx);
+       if (!(d->dma.status & RX_DMA_STATUS_DU)) {
+               /* it is not error, we just reached end of Rx done area */
+               return NULL;
+       }
+
+       pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+       skb = vring->ctx[vring->swhead];
+       dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
+       skb_trim(skb, d->dma.length);
+
+       wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+
+       /* use radiotap header only if required */
+       if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
+               wil_rx_add_radiotap_header(wil, skb, d);
+
+       wil_dbg_TXRX(wil, "Rx[%3d] : %d bytes\n", vring->swhead, d->dma.length);
+       wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_NONE, 32, 4,
+                         (const void *)d, sizeof(*d), false);
+
+       wil_vring_advance_head(vring, 1);
+
+       /* no extra checks if in sniffer mode */
+       if (ndev->type != ARPHRD_ETHER)
+               return skb;
+       /*
+        * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+        * Driver should recognize it by frame type, that is found
+        * in Rx descriptor. If type is not data, it is 802.11 frame as is
+        */
+       ftype = wil_rxdesc_ftype(d) << 2;
+       if (ftype != IEEE80211_FTYPE_DATA) {
+               wil_dbg_TXRX(wil, "Non-data frame ftype 0x%08x\n", ftype);
+               /* TODO: process it */
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       if (skb->len < ETH_HLEN) {
+               wil_err(wil, "Short frame, len = %d\n", skb->len);
+               /* TODO: process it (i.e. BAR) */
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       ds_bits = wil_rxdesc_ds_bits(d);
+       if (ds_bits == 1) {
+               /*
+                * HW bug - in ToDS mode, i.e. Rx on AP side,
+                * addresses get swapped
+                */
+               wil_swap_ethaddr(skb->data);
+       }
+
+       return skb;
+}
+
+/**
+ * allocate and fill up to @count buffers in rx ring
+ * buffers posted at @swtail
+ */
+static int wil_rx_refill(struct wil6210_priv *wil, int count)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct vring *v = &wil->vring_rx;
+       u32 next_tail;
+       int rc = 0;
+       int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
+                       WIL6210_RTAP_SIZE : 0;
+
+       for (; next_tail = wil_vring_next_tail(v),
+                       (next_tail != v->swhead) && (count-- > 0);
+                       v->swtail = next_tail) {
+               rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
+               if (rc) {
+                       wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
+                               rc, v->swtail);
+                       break;
+               }
+       }
+       iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+
+       return rc;
+}
+
+/*
+ * Pass Rx packet to the netif. Update statistics.
+ */
+static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+{
+       int rc;
+       unsigned int len = skb->len;
+
+       if (in_interrupt())
+               rc = netif_rx(skb);
+       else
+               rc = netif_rx_ni(skb);
+
+       if (likely(rc == NET_RX_SUCCESS)) {
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
+
+       } else {
+               ndev->stats.rx_dropped++;
+       }
+}
+
+/**
+ * Proceed all completed skb's from Rx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_rx_handle(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct vring *v = &wil->vring_rx;
+       struct sk_buff *skb;
+
+       if (!v->va) {
+               wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
+               return;
+       }
+       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       while (NULL != (skb = wil_vring_reap_rx(wil, v))) {
+               wil_hex_dump_TXRX("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+                                 skb->data, skb_headlen(skb), false);
+
+               skb_orphan(skb);
+
+               if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+                       skb->dev = ndev;
+                       skb_reset_mac_header(skb);
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       skb->pkt_type = PACKET_OTHERHOST;
+                       skb->protocol = htons(ETH_P_802_2);
+
+               } else {
+                       skb->protocol = eth_type_trans(skb, ndev);
+               }
+
+               wil_netif_rx_any(skb, ndev);
+       }
+       wil_rx_refill(wil, v->size);
+}
+
+int wil_rx_init(struct wil6210_priv *wil)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct vring *vring = &wil->vring_rx;
+       int rc;
+       struct wmi_cfg_rx_chain_cmd cmd = {
+               .action = WMI_RX_CHAIN_ADD,
+               .rx_sw_ring = {
+                       .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+               },
+               .mid = 0, /* TODO - what is it? */
+               .decap_trans_type = WMI_DECAP_TYPE_802_3,
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_cfg_rx_chain_done_event evt;
+       } __packed evt;
+
+       vring->size = WIL6210_RX_RING_SIZE;
+       rc = wil_vring_alloc(wil, vring);
+       if (rc)
+               return rc;
+
+       cmd.rx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+       cmd.rx_sw_ring.ring_size = cpu_to_le16(vring->size);
+       if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
+               struct ieee80211_channel *ch = wdev->preset_chandef.chan;
+
+               cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
+               if (ch)
+                       cmd.sniffer_cfg.channel = ch->hw_value - 1;
+               cmd.sniffer_cfg.phy_info_mode =
+                       cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
+               cmd.sniffer_cfg.phy_support =
+                       cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
+                                   ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+       }
+       /* typical time for secure PCP is 840ms */
+       rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+                     WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
+       if (rc)
+               goto err_free;
+
+       vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
+
+       wil_dbg(wil, "Rx init: status %d tail 0x%08x\n",
+               le32_to_cpu(evt.evt.status), vring->hwtail);
+
+       rc = wil_rx_refill(wil, vring->size);
+       if (rc)
+               goto err_free;
+
+       return 0;
+ err_free:
+       wil_vring_free(wil, vring, 0);
+
+       return rc;
+}
+
+void wil_rx_fini(struct wil6210_priv *wil)
+{
+       struct vring *vring = &wil->vring_rx;
+
+       if (vring->va) {
+               int rc;
+               struct wmi_cfg_rx_chain_cmd cmd = {
+                       .action = cpu_to_le32(WMI_RX_CHAIN_DEL),
+                       .rx_sw_ring = {
+                               .max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
+                       },
+               };
+               struct {
+                       struct wil6210_mbox_hdr_wmi wmi;
+                       struct wmi_cfg_rx_chain_done_event cfg;
+               } __packed wmi_rx_cfg_reply;
+
+               rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
+                             WMI_CFG_RX_CHAIN_DONE_EVENTID,
+                             &wmi_rx_cfg_reply, sizeof(wmi_rx_cfg_reply),
+                             100);
+               wil_vring_free(wil, vring, 0);
+       }
+}
+
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+                     int cid, int tid)
+{
+       int rc;
+       struct wmi_vring_cfg_cmd cmd = {
+               .action = cpu_to_le32(WMI_VRING_CMD_ADD),
+               .vring_cfg = {
+                       .tx_sw_ring = {
+                               .max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
+                       },
+                       .ringid = id,
+                       .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+                       .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
+                       .mac_ctrl = 0,
+                       .to_resolution = 0,
+                       .agg_max_wsize = 16,
+                       .schd_params = {
+                               .priority = cpu_to_le16(0),
+                               .timeslot_us = cpu_to_le16(0xfff),
+                       },
+               },
+       };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_vring_cfg_done_event cmd;
+       } __packed reply;
+       struct vring *vring = &wil->vring_tx[id];
+
+       if (vring->va) {
+               wil_err(wil, "Tx ring [%d] already allocated\n", id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       vring->size = size;
+       rc = wil_vring_alloc(wil, vring);
+       if (rc)
+               goto out;
+
+       cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
+       cmd.vring_cfg.tx_sw_ring.ring_size = cpu_to_le16(vring->size);
+
+       rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
+                     WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
+       if (rc)
+               goto out_free;
+
+       if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+               wil_err(wil, "Tx config failed, status 0x%02x\n",
+                       reply.cmd.status);
+               goto out_free;
+       }
+       vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+
+       return 0;
+ out_free:
+       wil_vring_free(wil, vring, 1);
+ out:
+
+       return rc;
+}
+
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
+{
+       struct vring *vring = &wil->vring_tx[id];
+
+       if (!vring->va)
+               return;
+
+       wil_vring_free(wil, vring, 1);
+}
+
+static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
+                                      struct sk_buff *skb)
+{
+       struct vring *v = &wil->vring_tx[0];
+
+       if (v->va)
+               return v;
+
+       return NULL;
+}
+
+static int wil_tx_desc_map(volatile struct vring_tx_desc *d,
+                          dma_addr_t pa, u32 len)
+{
+       d->dma.addr_low = lower_32_bits(pa);
+       d->dma.addr_high = (u16)upper_32_bits(pa);
+       d->dma.ip_length = 0;
+       /* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
+       d->dma.b11 = 0/*14 | BIT(7)*/;
+       d->dma.error = 0;
+       d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
+       d->dma.length = len;
+       d->dma.d0 = 0;
+       d->mac.d[0] = 0;
+       d->mac.d[1] = 0;
+       d->mac.d[2] = 0;
+       d->mac.ucode_cmd = 0;
+       /* use dst index 0 */
+       d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
+                      (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
+       /* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
+       d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
+                     (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
+
+       return 0;
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+                       struct sk_buff *skb)
+{
+       struct device *dev = wil_to_dev(wil);
+       volatile struct vring_tx_desc *d;
+       u32 swhead = vring->swhead;
+       int avail = wil_vring_avail_tx(vring);
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       uint f;
+       int vring_index = vring - wil->vring_tx;
+       uint i = swhead;
+       dma_addr_t pa;
+
+       wil_dbg_TXRX(wil, "%s()\n", __func__);
+
+       if (avail < vring->size/8)
+               netif_tx_stop_all_queues(wil_to_ndev(wil));
+       if (avail < 1 + nr_frags) {
+               wil_err(wil, "Tx ring full. No space for %d fragments\n",
+                       1 + nr_frags);
+               return -ENOMEM;
+       }
+       d = &(vring->va[i].tx);
+
+       /* FIXME FW can accept only unicast frames for the peer */
+       memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
+
+       pa = dma_map_single(dev, skb->data,
+                       skb_headlen(skb), DMA_TO_DEVICE);
+
+       wil_dbg_TXRX(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
+                    skb->data, (unsigned long long)pa);
+       wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
+                         skb->data, skb_headlen(skb), false);
+
+       if (unlikely(dma_mapping_error(dev, pa)))
+               return -EINVAL;
+       /* 1-st segment */
+       wil_tx_desc_map(d, pa, skb_headlen(skb));
+       d->mac.d[2] |= ((nr_frags + 1) <<
+                      MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+       /* middle segments */
+       for (f = 0; f < nr_frags; f++) {
+               const struct skb_frag_struct *frag =
+                               &skb_shinfo(skb)->frags[f];
+               int len = skb_frag_size(frag);
+               i = (swhead + f + 1) % vring->size;
+               d = &(vring->va[i].tx);
+               pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
+                               DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, pa)))
+                       goto dma_error;
+               wil_tx_desc_map(d, pa, len);
+               vring->ctx[i] = NULL;
+       }
+       /* for the last seg only */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
+       d->dma.d0 |= BIT(9); /* BUG: undocumented bit */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+       d->dma.d0 |= (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
+
+       wil_hex_dump_TXRX("Tx ", DUMP_PREFIX_NONE, 32, 4,
+                         (const void *)d, sizeof(*d), false);
+
+       /* advance swhead */
+       wil_vring_advance_head(vring, nr_frags + 1);
+       wil_dbg_TXRX(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
+       iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+       /* hold reference to skb
+        * to prevent skb release before accounting
+        * in case of immediate "tx done"
+        */
+       vring->ctx[i] = skb_get(skb);
+
+       return 0;
+ dma_error:
+       /* unmap what we have mapped */
+       /* Note: increment @f to operate with positive index */
+       for (f++; f > 0; f--) {
+               i = (swhead + f) % vring->size;
+               d = &(vring->va[i].tx);
+               d->dma.status = TX_DMA_STATUS_DU;
+               pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+               if (vring->ctx[i])
+                       dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+               else
+                       dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+       }
+
+       return -EINVAL;
+}
+
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
+       struct vring *vring;
+       int rc;
+
+       wil_dbg_TXRX(wil, "%s()\n", __func__);
+       if (!test_bit(wil_status_fwready, &wil->status)) {
+               wil_err(wil, "FW not ready\n");
+               goto drop;
+       }
+       if (!test_bit(wil_status_fwconnected, &wil->status)) {
+               wil_err(wil, "FW not connected\n");
+               goto drop;
+       }
+       if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
+               wil_err(wil, "Xmit in monitor mode not supported\n");
+               goto drop;
+       }
+       if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+               rc = wmi_tx_eapol(wil, skb);
+       } else {
+               /* find vring */
+               vring = wil_find_tx_vring(wil, skb);
+               if (!vring) {
+                       wil_err(wil, "No Tx VRING available\n");
+                       goto drop;
+               }
+               /* set up vring entry */
+               rc = wil_tx_vring(wil, vring, skb);
+       }
+       switch (rc) {
+       case 0:
+               ndev->stats.tx_packets++;
+               ndev->stats.tx_bytes += skb->len;
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       case -ENOMEM:
+               return NETDEV_TX_BUSY;
+       default:
+               ; /* goto drop; */
+               break;
+       }
+ drop:
+       netif_tx_stop_all_queues(ndev);
+       ndev->stats.tx_dropped++;
+       dev_kfree_skb_any(skb);
+
+       return NET_XMIT_DROP;
+}
+
+/**
+ * Clean up transmitted skb's from the Tx VRING
+ *
+ * Safe to call from IRQ
+ */
+void wil_tx_complete(struct wil6210_priv *wil, int ringid)
+{
+       struct device *dev = wil_to_dev(wil);
+       struct vring *vring = &wil->vring_tx[ringid];
+
+       if (!vring->va) {
+               wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
+               return;
+       }
+
+       wil_dbg_TXRX(wil, "%s(%d)\n", __func__, ringid);
+
+       while (!wil_vring_is_empty(vring)) {
+               volatile struct vring_tx_desc *d = &vring->va[vring->swtail].tx;
+               dma_addr_t pa;
+               struct sk_buff *skb;
+               if (!(d->dma.status & TX_DMA_STATUS_DU))
+                       break;
+
+               wil_dbg_TXRX(wil,
+                            "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+                            vring->swtail, d->dma.length, d->dma.status,
+                            d->dma.error);
+               wil_hex_dump_TXRX("TxC ", DUMP_PREFIX_NONE, 32, 4,
+                                 (const void *)d, sizeof(*d), false);
+
+               pa = d->dma.addr_low | ((u64)d->dma.addr_high << 32);
+               skb = vring->ctx[vring->swtail];
+               if (skb) {
+                       dma_unmap_single(dev, pa, d->dma.length, DMA_TO_DEVICE);
+                       dev_kfree_skb_any(skb);
+                       vring->ctx[vring->swtail] = NULL;
+               } else {
+                       dma_unmap_page(dev, pa, d->dma.length, DMA_TO_DEVICE);
+               }
+               d->dma.addr_low = 0;
+               d->dma.addr_high = 0;
+               d->dma.length = 0;
+               d->dma.status = TX_DMA_STATUS_DU;
+               vring->swtail = wil_vring_next_tail(vring);
+       }
+       if (wil_vring_avail_tx(vring) > vring->size/4)
+               netif_tx_wake_all_queues(wil_to_ndev(wil));
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
new file mode 100644 (file)
index 0000000..45a61f5
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef WIL6210_TXRX_H
+#define WIL6210_TXRX_H
+
+#define BUF_SW_OWNED    (1)
+#define BUF_HW_OWNED    (0)
+
+/* size of max. Rx packet */
+#define RX_BUF_LEN      (2048)
+#define TX_BUF_LEN      (2048)
+/* how many bytes to reserve for rtap header? */
+#define WIL6210_RTAP_SIZE (128)
+
+/* Tx/Rx path */
+/*
+ * Tx descriptor - MAC part
+ * [dword 0]
+ * bit  0.. 9 : lifetime_expiry_value:10
+ * bit     10 : interrup_en:1
+ * bit     11 : status_en:1
+ * bit 12..13 : txss_override:2
+ * bit     14 : timestamp_insertion:1
+ * bit     15 : duration_preserve:1
+ * bit 16..21 : reserved0:6
+ * bit 22..26 : mcs_index:5
+ * bit     27 : mcs_en:1
+ * bit 28..29 : reserved1:2
+ * bit     30 : reserved2:1
+ * bit     31 : sn_preserved:1
+ * [dword 1]
+ * bit  0.. 3 : pkt_mode:4
+ * bit      4 : pkt_mode_en:1
+ * bit  5.. 7 : reserved0:3
+ * bit  8..13 : reserved1:6
+ * bit     14 : reserved2:1
+ * bit     15 : ack_policy_en:1
+ * bit 16..19 : dst_index:4
+ * bit     20 : dst_index_en:1
+ * bit 21..22 : ack_policy:2
+ * bit     23 : lifetime_en:1
+ * bit 24..30 : max_retry:7
+ * bit     31 : max_retry_en:1
+ * [dword 2]
+ * bit  0.. 7 : num_of_descriptors:8
+ * bit  8..17 : reserved:10
+ * bit 18..19 : l2_translation_type:2
+ * bit     20 : snap_hdr_insertion_en:1
+ * bit     21 : vlan_removal_en:1
+ * bit 22..31 : reserved0:10
+ * [dword 3]
+ * bit  0.. 31: ucode_cmd:32
+ */
+struct vring_tx_mac {
+       u32 d[3];
+       u32 ucode_cmd;
+} __packed;
+
+/* TX MAC Dword 0 */
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_POS 0
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_LEN 10
+#define MAC_CFG_DESC_TX_0_LIFETIME_EXPIRY_VALUE_MSK 0x3FF
+
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_POS 10
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_INTERRUP_EN_MSK 0x400
+
+#define MAC_CFG_DESC_TX_0_STATUS_EN_POS 11
+#define MAC_CFG_DESC_TX_0_STATUS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_STATUS_EN_MSK 0x800
+
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_POS 12
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_LEN 2
+#define MAC_CFG_DESC_TX_0_TXSS_OVERRIDE_MSK 0x3000
+
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_POS 14
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_LEN 1
+#define MAC_CFG_DESC_TX_0_TIMESTAMP_INSERTION_MSK 0x4000
+
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_POS 15
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_LEN 1
+#define MAC_CFG_DESC_TX_0_DURATION_PRESERVE_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_POS 22
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_LEN 5
+#define MAC_CFG_DESC_TX_0_MCS_INDEX_MSK 0x7C00000
+
+#define MAC_CFG_DESC_TX_0_MCS_EN_POS 27
+#define MAC_CFG_DESC_TX_0_MCS_EN_LEN 1
+#define MAC_CFG_DESC_TX_0_MCS_EN_MSK 0x8000000
+
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_POS 31
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_LEN 1
+#define MAC_CFG_DESC_TX_0_SN_PRESERVED_MSK 0x80000000
+
+/* TX MAC Dword 1 */
+#define MAC_CFG_DESC_TX_1_PKT_MODE_POS 0
+#define MAC_CFG_DESC_TX_1_PKT_MODE_LEN 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_MSK 0xF
+
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS 4
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_PKT_MODE_EN_MSK 0x10
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_POS 15
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_EN_MSK 0x8000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_POS 16
+#define MAC_CFG_DESC_TX_1_DST_INDEX_LEN 4
+#define MAC_CFG_DESC_TX_1_DST_INDEX_MSK 0xF0000
+
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS 20
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_DST_INDEX_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_POS 21
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_LEN 2
+#define MAC_CFG_DESC_TX_1_ACK_POLICY_MSK 0x600000
+
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_POS 23
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_LIFETIME_EN_MSK 0x800000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_POS 24
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_LEN 7
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_MSK 0x7F000000
+
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_POS 31
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_LEN 1
+#define MAC_CFG_DESC_TX_1_MAX_RETRY_EN_MSK 0x80000000
+
+/* TX MAC Dword 2 */
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS 0
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_LEN 8
+#define MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_MSK 0xFF
+
+#define MAC_CFG_DESC_TX_2_RESERVED_POS 8
+#define MAC_CFG_DESC_TX_2_RESERVED_LEN 10
+#define MAC_CFG_DESC_TX_2_RESERVED_MSK 0x3FF00
+
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS 18
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_LEN 2
+#define MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_MSK 0xC0000
+
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS 20
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_MSK 0x100000
+
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_POS 21
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_LEN 1
+#define MAC_CFG_DESC_TX_2_VLAN_REMOVAL_EN_MSK 0x200000
+
+/* TX MAC Dword 3 */
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_POS 0
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_LEN 32
+#define MAC_CFG_DESC_TX_3_UCODE_CMD_MSK 0xFFFFFFFF
+
+/* TX DMA Dword 0 */
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_POS 0
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_LEN 8
+#define DMA_CFG_DESC_TX_0_L4_LENGTH_MSK 0xFF
+
+#define DMA_CFG_DESC_TX_0_CMD_EOP_POS 8
+#define DMA_CFG_DESC_TX_0_CMD_EOP_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_EOP_MSK 0x100
+
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS 10
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_LEN 1
+#define DMA_CFG_DESC_TX_0_CMD_DMA_IT_MSK 0x400
+
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS 11
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_LEN 2
+#define DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_MSK 0x1800
+
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS 13
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_SEG_EN_MSK 0x2000
+
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS 14
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_MSK 0x4000
+
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS 15
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_MSK 0x8000
+
+#define DMA_CFG_DESC_TX_0_QID_POS 16
+#define DMA_CFG_DESC_TX_0_QID_LEN 5
+#define DMA_CFG_DESC_TX_0_QID_MSK 0x1F0000
+
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS 21
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_LEN 1
+#define DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_MSK 0x200000
+
+#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
+#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+
+
+#define TX_DMA_STATUS_DU         BIT(0)
+
+struct vring_tx_dma {
+       u32 d0;
+       u32 addr_low;
+       u16 addr_high;
+       u8  ip_length;
+       u8  b11;       /* 0..6: mac_length; 7:ip_version */
+       u8  error;     /* 0..2: err; 3..7: reserved; */
+       u8  status;    /* 0: used; 1..7; reserved */
+       u16 length;
+} __packed;
+
+/*
+ * Rx descriptor - MAC part
+ * [dword 0]
+ * bit  0.. 3 : tid:4 The QoS (b3-0) TID Field
+ * bit  4.. 6 : connection_id:3 :The Source index that  was found during
+ *  Parsing the TA.  This field is used to  define the source of the packet
+ * bit      7 : reserved:1
+ * bit  8.. 9 : mac_id:2 : The MAC virtual  Ring number (always zero)
+ * bit 10..11 : frame_type:2 : The FC Control  (b3-2) -  MPDU Type
+ *              (management, data, control  and extension)
+ * bit 12..15 : frame_subtype:4 : The FC Control  (b7-4) -  Frame Subtype
+ * bit 16..27 : seq_number:12 The received Sequence number field
+ * bit 28..31 : extended:4 extended subtype
+ * [dword 1]
+ * bit  0.. 3 : reserved
+ * bit  4.. 5 : key_id:2
+ * bit      6 : decrypt_bypass:1
+ * bit      7 : security:1
+ * bit  8.. 9 : ds_bits:2
+ * bit     10 : a_msdu_present:1  from qos header
+ * bit     11 : a_msdu_type:1  from qos header
+ * bit     12 : a_mpdu:1  part of AMPDU aggregation
+ * bit     13 : broadcast:1
+ * bit     14 : mutlicast:1
+ * bit     15 : reserved:1
+ * bit 16..20 : rx_mac_qid:5   The Queue Identifier that the packet
+ *                             is received from
+ * bit 21..24 : mcs:4
+ * bit 25..28 : mic_icr:4
+ * bit 29..31 : reserved:3
+ * [dword 2]
+ * bit  0.. 2 : time_slot:3 The timeslot that the MPDU is received
+ * bit      3 : fc_protocol_ver:1 The FC Control  (b0) - Protocol  Version
+ * bit      4 : fc_order:1 The FC Control (b15) -Order
+ * bit  5.. 7 : qos_ack_policy:3  The QoS (b6-5) ack policy Field
+ * bit      8 : esop:1 The QoS (b4) ESOP field
+ * bit      9 : qos_rdg_more_ppdu:1 The QoS (b9) RDG  field
+ * bit 10..14 : qos_reserved:5 The QoS (b14-10) Reserved  field
+ * bit     15 : qos_ac_constraint:1
+ * bit 16..31 : pn_15_0:16 low 2 bytes of PN
+ * [dword 3]
+ * bit  0..31 : pn_47_16:32 high 4 bytes of PN
+ */
+struct vring_rx_mac {
+       u32 d0;
+       u32 d1;
+       u16 w4;
+       u16 pn_15_0;
+       u32 pn_47_16;
+} __packed;
+
+/*
+ * Rx descriptor - DMA part
+ * [dword 0]
+ * bit  0.. 7 : l4_length:8 layer 4 length
+ * bit  8.. 9 : reserved:2
+ * bit     10 : cmd_dma_it:1
+ * bit 11..15 : reserved:5
+ * bit 16..29 : phy_info_length:14
+ * bit 30..31 : l4_type:2 valid if the L4I bit is set in the status field
+ * [dword 1]
+ * bit  0..31 : addr_low:32 The payload buffer low address
+ * [dword 2]
+ * bit  0..15 : addr_high:16 The payload buffer high address
+ * bit 16..23 : ip_length:8
+ * bit 24..30 : mac_length:7
+ * bit     31 : ip_version:1
+ * [dword 3]
+ *  [byte 12] error
+ *  [byte 13] status
+ * bit      0 : du:1
+ * bit      1 : eop:1
+ * bit      2 : error:1
+ * bit      3 : mi:1
+ * bit      4 : l3_identified:1
+ * bit      5 : l4_identified:1
+ * bit      6 : phy_info_included:1
+ * bit      7 : reserved:1
+ *  [word 7] length
+ *
+ */
+
+#define RX_DMA_D0_CMD_DMA_IT     BIT(10)
+
+#define RX_DMA_STATUS_DU         BIT(0)
+#define RX_DMA_STATUS_ERROR      BIT(2)
+#define RX_DMA_STATUS_PHY_INFO   BIT(6)
+
+struct vring_rx_dma {
+       u32 d0;
+       u32 addr_low;
+       u16 addr_high;
+       u8  ip_length;
+       u8  b11;
+       u8  error;
+       u8  status;
+       u16 length;
+} __packed;
+
+struct vring_tx_desc {
+       struct vring_tx_mac mac;
+       struct vring_tx_dma dma;
+} __packed;
+
+struct vring_rx_desc {
+       struct vring_rx_mac mac;
+       struct vring_rx_dma dma;
+} __packed;
+
+union vring_desc {
+       struct vring_tx_desc tx;
+       struct vring_rx_desc rx;
+} __packed;
+
+static inline int wil_rxdesc_phy_length(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->dma.d0, 16, 29);
+}
+
+static inline int wil_rxdesc_mcs(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d1, 21, 24);
+}
+
+static inline int wil_rxdesc_ds_bits(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d1, 8, 9);
+}
+
+static inline int wil_rxdesc_ftype(volatile struct vring_rx_desc *d)
+{
+       return WIL_GET_BITS(d->mac.d0, 10, 11);
+}
+
+#endif /* WIL6210_TXRX_H */
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
new file mode 100644 (file)
index 0000000..9bcfffa
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_H__
+#define __WIL6210_H__
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/cfg80211.h>
+
+#include "dbg_hexdump.h"
+
+#define WIL_NAME "wil6210"
+
+/**
+ * extract bits [@b0:@b1] (inclusive) from the value @x
+ * it should be @b0 <= @b1, or result is incorrect
+ */
+static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
+{
+       return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
+}
+
+#define WIL6210_MEM_SIZE (2*1024*1024UL)
+
+#define WIL6210_TX_QUEUES (4)
+
+#define WIL6210_RX_RING_SIZE (128)
+#define WIL6210_TX_RING_SIZE (128)
+#define WIL6210_MAX_TX_RINGS (24)
+
+/* Hardware definitions begin */
+
+/*
+ * Mapping
+ * RGF File      | Host addr    |  FW addr
+ *               |              |
+ * user_rgf      | 0x000000     | 0x880000
+ *  dma_rgf      | 0x001000     | 0x881000
+ * pcie_rgf      | 0x002000     | 0x882000
+ *               |              |
+ */
+
+/* Where various structures placed in host address space */
+#define WIL6210_FW_HOST_OFF      (0x880000UL)
+
+#define HOSTADDR(fwaddr)        (fwaddr - WIL6210_FW_HOST_OFF)
+
+/*
+ * Interrupt control registers block
+ *
+ * each interrupt controlled by the same bit in all registers
+ */
+struct RGF_ICR {
+       u32 ICC; /* Cause Control, RW: 0 - W1C, 1 - COR */
+       u32 ICR; /* Cause, W1C/COR depending on ICC */
+       u32 ICM; /* Cause masked (ICR & ~IMV), W1C/COR depending on ICC */
+       u32 ICS; /* Cause Set, WO */
+       u32 IMV; /* Mask, RW+S/C */
+       u32 IMS; /* Mask Set, write 1 to set */
+       u32 IMC; /* Mask Clear, write 1 to clear */
+} __packed;
+
+/* registers - FW addresses */
+#define RGF_USER_USER_SCRATCH_PAD      (0x8802bc)
+#define RGF_USER_USER_ICR              (0x880b4c) /* struct RGF_ICR */
+       #define BIT_USER_USER_ICR_SW_INT_2      BIT(18)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0        (0x880b14)
+#define RGF_USER_MAC_CPU_0             (0x8801fc)
+#define RGF_USER_USER_CPU_0            (0x8801e0)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
+#define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
+
+#define RGF_DMA_PSEUDO_CAUSE           (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW   (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW   (0x881c70)
+       #define BIT_DMA_PSEUDO_CAUSE_RX         BIT(0)
+       #define BIT_DMA_PSEUDO_CAUSE_TX         BIT(1)
+       #define BIT_DMA_PSEUDO_CAUSE_MISC       BIT(2)
+
+#define RGF_DMA_EP_TX_ICR              (0x881bb4) /* struct RGF_ICR */
+       #define BIT_DMA_EP_TX_ICR_TX_DONE       BIT(0)
+       #define BIT_DMA_EP_TX_ICR_TX_DONE_N(n)  BIT(n+1) /* n = [0..23] */
+#define RGF_DMA_EP_RX_ICR              (0x881bd0) /* struct RGF_ICR */
+       #define BIT_DMA_EP_RX_ICR_RX_DONE       BIT(0)
+#define RGF_DMA_EP_MISC_ICR            (0x881bec) /* struct RGF_ICR */
+       #define BIT_DMA_EP_MISC_ICR_RX_HTRSH    BIT(0)
+       #define BIT_DMA_EP_MISC_ICR_TX_NO_ACT   BIT(1)
+       #define BIT_DMA_EP_MISC_ICR_FW_INT0     BIT(28)
+       #define BIT_DMA_EP_MISC_ICR_FW_INT1     BIT(29)
+
+/* Interrupt moderation control */
+#define RGF_DMA_ITR_CNT_TRSH           (0x881c5c)
+#define RGF_DMA_ITR_CNT_DATA           (0x881c60)
+#define RGF_DMA_ITR_CNT_CRL            (0x881C64)
+       #define BIT_DMA_ITR_CNT_CRL_EN          BIT(0)
+       #define BIT_DMA_ITR_CNT_CRL_EXT_TICK    BIT(1)
+       #define BIT_DMA_ITR_CNT_CRL_FOREVER     BIT(2)
+       #define BIT_DMA_ITR_CNT_CRL_CLR         BIT(3)
+       #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH  BIT(4)
+
+/* popular locations */
+#define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
+#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
+       offsetof(struct RGF_ICR, ICS))
+#define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
+
+/* ISR register bits */
+#define ISR_MISC_FW_READY BIT_DMA_EP_MISC_ICR_FW_INT0
+#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT1
+
+/* Hardware definitions end */
+
+struct wil6210_mbox_ring {
+       u32 base;
+       u16 entry_size; /* max. size of mbox entry, incl. all headers */
+       u16 size;
+       u32 tail;
+       u32 head;
+} __packed;
+
+struct wil6210_mbox_ring_desc {
+       __le32 sync;
+       __le32 addr;
+} __packed;
+
+/* at HOST_OFF_WIL6210_MBOX_CTL */
+struct wil6210_mbox_ctl {
+       struct wil6210_mbox_ring tx;
+       struct wil6210_mbox_ring rx;
+} __packed;
+
+struct wil6210_mbox_hdr {
+       __le16 seq;
+       __le16 len; /* payload, bytes after this header */
+       __le16 type;
+       u8 flags;
+       u8 reserved;
+} __packed;
+
+#define WIL_MBOX_HDR_TYPE_WMI (0)
+
+/* max. value for wil6210_mbox_hdr.len */
+#define MAX_MBOXITEM_SIZE   (240)
+
+struct wil6210_mbox_hdr_wmi {
+       u8 reserved0[2];
+       __le16 id;
+       __le16 info1; /* bits [0..3] - device_id, rest - unused */
+       u8 reserved1[2];
+} __packed;
+
+struct pending_wmi_event {
+       struct list_head list;
+       struct {
+               struct wil6210_mbox_hdr hdr;
+               struct wil6210_mbox_hdr_wmi wmi;
+               u8 data[0];
+       } __packed event;
+};
+
+union vring_desc;
+
+struct vring {
+       dma_addr_t pa;
+       volatile union vring_desc *va; /* vring_desc[size], WriteBack by DMA */
+       u16 size; /* number of vring_desc elements */
+       u32 swtail;
+       u32 swhead;
+       u32 hwtail; /* write here to inform hw */
+       void **ctx; /* void *ctx[size] - software context */
+};
+
+enum { /* for wil6210_priv.status */
+       wil_status_fwready = 0,
+       wil_status_fwconnected,
+       wil_status_dontscan,
+       wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+};
+
+struct pci_dev;
+
+struct wil6210_stats {
+       u64 tsf;
+       u32 snr;
+       u16 last_mcs_rx;
+       u16 bf_mcs; /* last BF, used for Tx */
+       u16 my_rx_sector;
+       u16 my_tx_sector;
+       u16 peer_rx_sector;
+       u16 peer_tx_sector;
+};
+
+struct wil6210_priv {
+       struct pci_dev *pdev;
+       int n_msi;
+       struct wireless_dev *wdev;
+       void __iomem *csr;
+       ulong status;
+       /* profile */
+       u32 monitor_flags;
+       u32 secure_pcp; /* create secure PCP? */
+       int sinfo_gen;
+       /* cached ISR registers */
+       u32 isr_misc;
+       /* mailbox related */
+       struct mutex wmi_mutex;
+       struct wil6210_mbox_ctl mbox_ctl;
+       struct completion wmi_ready;
+       u16 wmi_seq;
+       u16 reply_id; /**< wait for this WMI event */
+       void *reply_buf;
+       u16 reply_size;
+       struct workqueue_struct *wmi_wq; /* for deferred calls */
+       struct work_struct wmi_event_worker;
+       struct workqueue_struct *wmi_wq_conn; /* for connect worker */
+       struct work_struct wmi_connect_worker;
+       struct work_struct disconnect_worker;
+       struct timer_list connect_timer;
+       int pending_connect_cid;
+       struct list_head pending_wmi_ev;
+       /*
+        * protect pending_wmi_ev
+        * - fill in IRQ from wil6210_irq_misc,
+        * - consumed in thread by wmi_event_worker
+        */
+       spinlock_t wmi_ev_lock;
+       /* DMA related */
+       struct vring vring_rx;
+       struct vring vring_tx[WIL6210_MAX_TX_RINGS];
+       u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+       /* scan */
+       struct cfg80211_scan_request *scan_request;
+
+       struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+       /* statistics */
+       struct wil6210_stats stats;
+       /* debugfs */
+       struct dentry *debug;
+       struct debugfs_blob_wrapper fw_code_blob;
+       struct debugfs_blob_wrapper fw_data_blob;
+       struct debugfs_blob_wrapper fw_peri_blob;
+       struct debugfs_blob_wrapper uc_code_blob;
+       struct debugfs_blob_wrapper uc_data_blob;
+       struct debugfs_blob_wrapper rgf_blob;
+};
+
+#define wil_to_wiphy(i) (i->wdev->wiphy)
+#define wil_to_dev(i) (wiphy_dev(wil_to_wiphy(i)))
+#define wiphy_to_wil(w) (struct wil6210_priv *)(wiphy_priv(w))
+#define wil_to_wdev(i) (i->wdev)
+#define wdev_to_wil(w) (struct wil6210_priv *)(wdev_priv(w))
+#define wil_to_ndev(i) (wil_to_wdev(i)->netdev)
+#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
+
+#define wil_dbg(wil, fmt, arg...) netdev_dbg(wil_to_ndev(wil), fmt, ##arg)
+#define wil_info(wil, fmt, arg...) netdev_info(wil_to_ndev(wil), fmt, ##arg)
+#define wil_err(wil, fmt, arg...) netdev_err(wil_to_ndev(wil), fmt, ##arg)
+
+#define wil_dbg_IRQ(wil, fmt, arg...) wil_dbg(wil, "DBG[ IRQ]" fmt, ##arg)
+#define wil_dbg_TXRX(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
+#define wil_dbg_WMI(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
+
+#define wil_hex_dump_TXRX(prefix_str, prefix_type, rowsize,    \
+                         groupsize, buf, len, ascii)           \
+                         wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+                                        prefix_type, rowsize,  \
+                                        groupsize, buf, len, ascii)
+
+#define wil_hex_dump_WMI(prefix_str, prefix_type, rowsize,     \
+                        groupsize, buf, len, ascii)            \
+                        wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+                                       prefix_type, rowsize,   \
+                                       groupsize, buf, len, ascii)
+
+void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+                         size_t count);
+void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+                       size_t count);
+
+void *wil_if_alloc(struct device *dev, void __iomem *csr);
+void wil_if_free(struct wil6210_priv *wil);
+int wil_if_add(struct wil6210_priv *wil);
+void wil_if_remove(struct wil6210_priv *wil);
+int wil_priv_init(struct wil6210_priv *wil);
+void wil_priv_deinit(struct wil6210_priv *wil);
+int wil_reset(struct wil6210_priv *wil);
+void wil_link_on(struct wil6210_priv *wil);
+void wil_link_off(struct wil6210_priv *wil);
+int wil_up(struct wil6210_priv *wil);
+int wil_down(struct wil6210_priv *wil);
+void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+                struct wil6210_mbox_hdr *hdr);
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
+void wmi_recv_cmd(struct wil6210_priv *wil);
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+            u16 reply_id, void *reply, u8 reply_size, int to_msec);
+void wmi_connect_worker(struct work_struct *work);
+void wmi_event_worker(struct work_struct *work);
+void wmi_event_flush(struct wil6210_priv *wil);
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
+int wmi_set_channel(struct wil6210_priv *wil, int channel);
+int wmi_get_channel(struct wil6210_priv *wil, int *channel);
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb);
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr);
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr, int key_len, const void *key);
+int wmi_echo(struct wil6210_priv *wil);
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
+
+int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
+void wil6210_disable_irq(struct wil6210_priv *wil);
+void wil6210_enable_irq(struct wil6210_priv *wil);
+
+int wil6210_debugfs_init(struct wil6210_priv *wil);
+void wil6210_debugfs_remove(struct wil6210_priv *wil);
+
+struct wireless_dev *wil_cfg80211_init(struct device *dev);
+void wil_wdev_free(struct wil6210_priv *wil);
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
+
+int wil_rx_init(struct wil6210_priv *wil);
+void wil_rx_fini(struct wil6210_priv *wil);
+
+/* TX API */
+int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
+                     int cid, int tid);
+void wil_vring_fini_tx(struct wil6210_priv *wil, int id);
+
+netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+void wil_tx_complete(struct wil6210_priv *wil, int ringid);
+
+/* RX API */
+void wil_rx_handle(struct wil6210_priv *wil);
+
+int wil_iftype_nl2wmi(enum nl80211_iftype type);
+
+#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
new file mode 100644 (file)
index 0000000..12915f6
--- /dev/null
@@ -0,0 +1,975 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/etherdevice.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+/**
+ * WMI event receiving - theory of operations
+ *
+ * When firmware about to report WMI event, it fills memory area
+ * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
+ * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
+ *
+ * @wmi_recv_cmd reads event, allocates memory chunk  and attaches it to the
+ * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
+ * and handles events within the @wmi_event_worker. Every event get detached
+ * from list, processed and deleted.
+ *
+ * Purpose for this mechanism is to release IRQ thread; otherwise,
+ * if WMI event handling involves another WMI command flow, this 2-nd flow
+ * won't be completed because of blocked IRQ thread.
+ */
+
+/**
+ * Addressing - theory of operations
+ *
+ * There are several buses present on the WIL6210 card.
+ * Same memory areas are visible at different address on
+ * the different busses. There are 3 main bus masters:
+ *  - MAC CPU (ucode)
+ *  - User CPU (firmware)
+ *  - AHB (host)
+ *
+ * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
+ * AHB addresses starting from 0x880000
+ *
+ * Internally, firmware uses addresses that allows faster access but
+ * are invisible from the host. To read from these addresses, alternative
+ * AHB address must be used.
+ *
+ * Memory mapping
+ * Linker address         PCI/Host address
+ *                        0x880000 .. 0xa80000  2Mb BAR0
+ * 0x800000 .. 0x807000   0x900000 .. 0x907000  28k DCCM
+ * 0x840000 .. 0x857000   0x908000 .. 0x91f000  92k PERIPH
+ */
+
+/**
+ * @fw_mapping provides memory remapping table
+ */
+static const struct {
+       u32 from; /* linker address - from, inclusive */
+       u32 to;   /* linker address - to, exclusive */
+       u32 host; /* PCI/Host address - BAR0 + 0x880000 */
+} fw_mapping[] = {
+       {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
+       {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
+       {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
+       {0x880000, 0x88a000, 0x880000}, /* various RGF */
+       {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
+       /*
+        * 920000..930000 ucode code RAM
+        * 930000..932000 ucode data RAM
+        */
+};
+
+/**
+ * return AHB address for given firmware/ucode internal (linker) address
+ * @x - internal address
+ * If address have no valid AHB mapping, return 0
+ */
+static u32 wmi_addr_remap(u32 x)
+{
+       uint i;
+
+       for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+               if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
+                       return x + fw_mapping[i].host - fw_mapping[i].from;
+       }
+
+       return 0;
+}
+
+/**
+ * Check address validity for WMI buffer; remap if needed
+ * @ptr - internal (linker) fw/ucode address
+ *
+ * Valid buffer should be DWORD aligned
+ *
+ * return address for accessing buffer from the host;
+ * if buffer is not valid, return NULL.
+ */
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+       u32 off;
+       u32 ptr = le32_to_cpu(ptr_);
+
+       if (ptr % 4)
+               return NULL;
+
+       ptr = wmi_addr_remap(ptr);
+       if (ptr < WIL6210_FW_HOST_OFF)
+               return NULL;
+
+       off = HOSTADDR(ptr);
+       if (off > WIL6210_MEM_SIZE - 4)
+               return NULL;
+
+       return wil->csr + off;
+}
+
+/**
+ * Check address validity
+ */
+void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
+{
+       u32 off;
+
+       if (ptr % 4)
+               return NULL;
+
+       if (ptr < WIL6210_FW_HOST_OFF)
+               return NULL;
+
+       off = HOSTADDR(ptr);
+       if (off > WIL6210_MEM_SIZE - 4)
+               return NULL;
+
+       return wil->csr + off;
+}
+
+int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
+                struct wil6210_mbox_hdr *hdr)
+{
+       void __iomem *src = wmi_buffer(wil, ptr);
+       if (!src)
+               return -EINVAL;
+
+       wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
+
+       return 0;
+}
+
+static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+       struct {
+               struct wil6210_mbox_hdr hdr;
+               struct wil6210_mbox_hdr_wmi wmi;
+       } __packed cmd = {
+               .hdr = {
+                       .type = WIL_MBOX_HDR_TYPE_WMI,
+                       .flags = 0,
+                       .len = cpu_to_le16(sizeof(cmd.wmi) + len),
+               },
+               .wmi = {
+                       .id = cpu_to_le16(cmdid),
+                       .info1 = 0,
+               },
+       };
+       struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
+       struct wil6210_mbox_ring_desc d_head;
+       u32 next_head;
+       void __iomem *dst;
+       void __iomem *head = wmi_addr(wil, r->head);
+       uint retry;
+
+       if (sizeof(cmd) + len > r->entry_size) {
+               wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
+                       (int)(sizeof(cmd) + len), r->entry_size);
+               return -ERANGE;
+
+       }
+
+       might_sleep();
+
+       if (!test_bit(wil_status_fwready, &wil->status)) {
+               wil_err(wil, "FW not ready\n");
+               return -EAGAIN;
+       }
+
+       if (!head) {
+               wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
+               return -EINVAL;
+       }
+       /* read Tx head till it is not busy */
+       for (retry = 5; retry > 0; retry--) {
+               wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
+               if (d_head.sync == 0)
+                       break;
+               msleep(20);
+       }
+       if (d_head.sync != 0) {
+               wil_err(wil, "WMI head busy\n");
+               return -EBUSY;
+       }
+       /* next head */
+       next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
+       wil_dbg_WMI(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
+       /* wait till FW finish with previous command */
+       for (retry = 5; retry > 0; retry--) {
+               r->tail = ioread32(wil->csr + HOST_MBOX +
+                                  offsetof(struct wil6210_mbox_ctl, tx.tail));
+               if (next_head != r->tail)
+                       break;
+               msleep(20);
+       }
+       if (next_head == r->tail) {
+               wil_err(wil, "WMI ring full\n");
+               return -EBUSY;
+       }
+       dst = wmi_buffer(wil, d_head.addr);
+       if (!dst) {
+               wil_err(wil, "invalid WMI buffer: 0x%08x\n",
+                       le32_to_cpu(d_head.addr));
+               return -EINVAL;
+       }
+       cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
+       /* set command */
+       wil_dbg_WMI(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+       wil_hex_dump_WMI("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
+                        sizeof(cmd), true);
+       wil_hex_dump_WMI("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
+                        len, true);
+       wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
+       wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
+       /* mark entry as full */
+       iowrite32(1, wil->csr + HOSTADDR(r->head) +
+                 offsetof(struct wil6210_mbox_ring_desc, sync));
+       /* advance next ptr */
+       iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
+                 offsetof(struct wil6210_mbox_ctl, tx.head));
+
+       /* interrupt to FW */
+       iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+
+       return 0;
+}
+
+int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
+{
+       int rc;
+
+       mutex_lock(&wil->wmi_mutex);
+       rc = __wmi_send(wil, cmdid, buf, len);
+       mutex_unlock(&wil->wmi_mutex);
+
+       return rc;
+}
+
+/*=== Event handlers ===*/
+static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct wmi_ready_event *evt = d;
+       u32 ver = le32_to_cpu(evt->sw_version);
+
+       wil_dbg_WMI(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+
+       if (!is_valid_ether_addr(ndev->dev_addr)) {
+               memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
+               memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
+       }
+       snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
+                "%d", ver);
+}
+
+static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
+                            int len)
+{
+       wil_dbg_WMI(wil, "WMI: FW ready\n");
+
+       set_bit(wil_status_fwready, &wil->status);
+       /* reuse wmi_ready for the firmware ready indication */
+       complete(&wil->wmi_ready);
+}
+
+static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_rx_mgmt_packet_event *data = d;
+       struct wiphy *wiphy = wil_to_wiphy(wil);
+       struct ieee80211_mgmt *rx_mgmt_frame =
+                       (struct ieee80211_mgmt *)data->payload;
+       int ch_no = data->info.channel+1;
+       u32 freq = ieee80211_channel_to_frequency(ch_no,
+                       IEEE80211_BAND_60GHZ);
+       struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
+       /* TODO convert LE to CPU */
+       s32 signal = 0; /* TODO */
+       __le16 fc = rx_mgmt_frame->frame_control;
+       u32 d_len = le32_to_cpu(data->info.len);
+       u16 d_status = le16_to_cpu(data->info.status);
+
+       wil_dbg_WMI(wil, "MGMT: channel %d MCS %d SNR %d\n",
+                   data->info.channel, data->info.mcs, data->info.snr);
+       wil_dbg_WMI(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
+                   le16_to_cpu(data->info.stype));
+       wil_dbg_WMI(wil, "qid %d mid %d cid %d\n",
+                   data->info.qid, data->info.mid, data->info.cid);
+
+       if (!channel) {
+               wil_err(wil, "Frame on unsupported channel\n");
+               return;
+       }
+
+       if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
+               struct cfg80211_bss *bss;
+               u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
+               u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
+               u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
+               const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
+               size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
+                                                u.beacon.variable);
+               wil_dbg_WMI(wil, "Capability info : 0x%04x\n", cap);
+
+               bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
+                                         tsf, cap, bi, ie_buf, ie_len,
+                                         signal, GFP_KERNEL);
+               if (bss) {
+                       wil_dbg_WMI(wil, "Added BSS %pM\n",
+                                   rx_mgmt_frame->bssid);
+                       cfg80211_put_bss(bss);
+               } else {
+                       wil_err(wil, "cfg80211_inform_bss() failed\n");
+               }
+       }
+}
+
+static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
+                                 void *d, int len)
+{
+       if (wil->scan_request) {
+               struct wmi_scan_complete_event *data = d;
+               bool aborted = (data->status != 0);
+
+               wil_dbg_WMI(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
+               cfg80211_scan_done(wil->scan_request, aborted);
+               wil->scan_request = NULL;
+       } else {
+               wil_err(wil, "SCAN_COMPLETE while not scanning\n");
+       }
+}
+
+static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+       struct wmi_connect_event *evt = d;
+       int ch; /* channel number */
+       struct station_info sinfo;
+       u8 *assoc_req_ie, *assoc_resp_ie;
+       size_t assoc_req_ielen, assoc_resp_ielen;
+       /* capinfo(u16) + listen_interval(u16) + IEs */
+       const size_t assoc_req_ie_offset = sizeof(u16) * 2;
+       /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
+       const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
+
+       if (len < sizeof(*evt)) {
+               wil_err(wil, "Connect event too short : %d bytes\n", len);
+               return;
+       }
+       if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
+                  evt->assoc_resp_len) {
+               wil_err(wil,
+                       "Connect event corrupted : %d != %d + %d + %d + %d\n",
+                       len, (int)sizeof(*evt), evt->beacon_ie_len,
+                       evt->assoc_req_len, evt->assoc_resp_len);
+               return;
+       }
+       ch = evt->channel + 1;
+       wil_dbg_WMI(wil, "Connect %pM channel [%d] cid %d\n",
+                   evt->bssid, ch, evt->cid);
+       wil_hex_dump_WMI("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
+                        evt->assoc_info, len - sizeof(*evt), true);
+
+       /* figure out IE's */
+       assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
+                                       assoc_req_ie_offset];
+       assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
+       if (evt->assoc_req_len <= assoc_req_ie_offset) {
+               assoc_req_ie = NULL;
+               assoc_req_ielen = 0;
+       }
+
+       assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
+                                        evt->assoc_req_len +
+                                        assoc_resp_ie_offset];
+       assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
+       if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
+               assoc_resp_ie = NULL;
+               assoc_resp_ielen = 0;
+       }
+
+       if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
+           (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
+               if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+                       wil_err(wil, "Not in connecting state\n");
+                       return;
+               }
+               del_timer_sync(&wil->connect_timer);
+               cfg80211_connect_result(ndev, evt->bssid,
+                                       assoc_req_ie, assoc_req_ielen,
+                                       assoc_resp_ie, assoc_resp_ielen,
+                                       WLAN_STATUS_SUCCESS, GFP_KERNEL);
+
+       } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+                  (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+               memset(&sinfo, 0, sizeof(sinfo));
+
+               sinfo.generation = wil->sinfo_gen++;
+
+               if (assoc_req_ie) {
+                       sinfo.assoc_req_ies = assoc_req_ie;
+                       sinfo.assoc_req_ies_len = assoc_req_ielen;
+                       sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
+               }
+
+               cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
+       }
+       set_bit(wil_status_fwconnected, &wil->status);
+
+       /* FIXME FW can transmit only ucast frames to peer */
+       /* FIXME real ring_id instead of hard coded 0 */
+       memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+
+       wil->pending_connect_cid = evt->cid;
+       queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+}
+
+static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
+                              void *d, int len)
+{
+       struct wmi_disconnect_event *evt = d;
+
+       wil_dbg_WMI(wil, "Disconnect %pM reason %d proto %d wmi\n",
+                   evt->bssid,
+                   evt->protocol_reason_status, evt->disconnect_reason);
+
+       wil->sinfo_gen++;
+
+       wil6210_disconnect(wil, evt->bssid);
+       clear_bit(wil_status_dontscan, &wil->status);
+}
+
+static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_notify_req_done_event *evt = d;
+
+       if (len < sizeof(*evt)) {
+               wil_err(wil, "Short NOTIFY event\n");
+               return;
+       }
+
+       wil->stats.tsf = le64_to_cpu(evt->tsf);
+       wil->stats.snr = le32_to_cpu(evt->snr_val);
+       wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
+       wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
+       wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
+       wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
+       wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
+       wil_dbg_WMI(wil, "Link status, MCS %d TSF 0x%016llx\n"
+                   "BF status 0x%08x SNR 0x%08x\n"
+                   "Tx Tpt %d goodput %d Rx goodput %d\n"
+                   "Sectors(rx:tx) my %d:%d peer %d:%d\n",
+                   wil->stats.bf_mcs, wil->stats.tsf, evt->status,
+                   wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+                   le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
+                   wil->stats.my_rx_sector, wil->stats.my_tx_sector,
+                   wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
+}
+
+/*
+ * Firmware reports EAPOL frame using WME event.
+ * Reconstruct Ethernet frame and deliver it via normal Rx
+ */
+static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
+                            void *d, int len)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wmi_eapol_rx_event *evt = d;
+       u16 eapol_len = le16_to_cpu(evt->eapol_len);
+       int sz = eapol_len + ETH_HLEN;
+       struct sk_buff *skb;
+       struct ethhdr *eth;
+
+       wil_dbg_WMI(wil, "EAPOL len %d from %pM\n", eapol_len,
+                   evt->src_mac);
+
+       if (eapol_len > 196) { /* TODO: revisit size limit */
+               wil_err(wil, "EAPOL too large\n");
+               return;
+       }
+
+       skb = alloc_skb(sz, GFP_KERNEL);
+       if (!skb) {
+               wil_err(wil, "Failed to allocate skb\n");
+               return;
+       }
+       eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
+       memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
+       memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
+       eth->h_proto = cpu_to_be16(ETH_P_PAE);
+       memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
+       skb->protocol = eth_type_trans(skb, ndev);
+       if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
+               ndev->stats.rx_packets++;
+               ndev->stats.rx_bytes += skb->len;
+       } else {
+               ndev->stats.rx_dropped++;
+       }
+}
+
+static const struct {
+       int eventid;
+       void (*handler)(struct wil6210_priv *wil, int eventid,
+                       void *data, int data_len);
+} wmi_evt_handlers[] = {
+       {WMI_READY_EVENTID,             wmi_evt_ready},
+       {WMI_FW_READY_EVENTID,          wmi_evt_fw_ready},
+       {WMI_RX_MGMT_PACKET_EVENTID,    wmi_evt_rx_mgmt},
+       {WMI_SCAN_COMPLETE_EVENTID,     wmi_evt_scan_complete},
+       {WMI_CONNECT_EVENTID,           wmi_evt_connect},
+       {WMI_DISCONNECT_EVENTID,        wmi_evt_disconnect},
+       {WMI_NOTIFY_REQ_DONE_EVENTID,   wmi_evt_notify},
+       {WMI_EAPOL_RX_EVENTID,          wmi_evt_eapol_rx},
+};
+
+/*
+ * Run in IRQ context
+ * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
+ * that will be eventually handled by the @wmi_event_worker in the thread
+ * context of thread "wil6210_wmi"
+ */
+void wmi_recv_cmd(struct wil6210_priv *wil)
+{
+       struct wil6210_mbox_ring_desc d_tail;
+       struct wil6210_mbox_hdr hdr;
+       struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+       struct pending_wmi_event *evt;
+       u8 *cmd;
+       void __iomem *src;
+       ulong flags;
+
+       for (;;) {
+               u16 len;
+
+               r->head = ioread32(wil->csr + HOST_MBOX +
+                                  offsetof(struct wil6210_mbox_ctl, rx.head));
+               if (r->tail == r->head)
+                       return;
+
+               /* read cmd from tail */
+               wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
+                                    sizeof(struct wil6210_mbox_ring_desc));
+               if (d_tail.sync == 0) {
+                       wil_err(wil, "Mbox evt not owned by FW?\n");
+                       return;
+               }
+
+               if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
+                       wil_err(wil, "Mbox evt at 0x%08x?\n",
+                               le32_to_cpu(d_tail.addr));
+                       return;
+               }
+
+               len = le16_to_cpu(hdr.len);
+               src = wmi_buffer(wil, d_tail.addr) +
+                     sizeof(struct wil6210_mbox_hdr);
+               evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
+                                            event.wmi) + len, 4),
+                             GFP_KERNEL);
+               if (!evt) {
+                       wil_err(wil, "kmalloc for WMI event (%d) failed\n",
+                               len);
+                       return;
+               }
+               evt->event.hdr = hdr;
+               cmd = (void *)&evt->event.wmi;
+               wil_memcpy_fromio_32(cmd, src, len);
+               /* mark entry as empty */
+               iowrite32(0, wil->csr + HOSTADDR(r->tail) +
+                         offsetof(struct wil6210_mbox_ring_desc, sync));
+               /* indicate */
+               wil_dbg_WMI(wil, "Mbox evt %04x %04x %04x %02x\n",
+                           le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
+                           hdr.flags);
+               if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
+                   (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+                       wil_dbg_WMI(wil, "WMI event 0x%04x\n",
+                                   evt->event.wmi.id);
+               }
+               wil_hex_dump_WMI("evt ", DUMP_PREFIX_OFFSET, 16, 1,
+                                &evt->event.hdr, sizeof(hdr) + len, true);
+
+               /* advance tail */
+               r->tail = r->base + ((r->tail - r->base +
+                         sizeof(struct wil6210_mbox_ring_desc)) % r->size);
+               iowrite32(r->tail, wil->csr + HOST_MBOX +
+                         offsetof(struct wil6210_mbox_ctl, rx.tail));
+
+               /* add to the pending list */
+               spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+               list_add_tail(&evt->list, &wil->pending_wmi_ev);
+               spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+               {
+                       int q = queue_work(wil->wmi_wq,
+                                          &wil->wmi_event_worker);
+                       wil_dbg_WMI(wil, "queue_work -> %d\n", q);
+               }
+       }
+}
+
+int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
+            u16 reply_id, void *reply, u8 reply_size, int to_msec)
+{
+       int rc;
+       int remain;
+
+       mutex_lock(&wil->wmi_mutex);
+
+       rc = __wmi_send(wil, cmdid, buf, len);
+       if (rc)
+               goto out;
+
+       wil->reply_id = reply_id;
+       wil->reply_buf = reply;
+       wil->reply_size = reply_size;
+       remain = wait_for_completion_timeout(&wil->wmi_ready,
+                       msecs_to_jiffies(to_msec));
+       if (0 == remain) {
+               wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
+                       cmdid, reply_id, to_msec);
+               rc = -ETIME;
+       } else {
+               wil_dbg_WMI(wil,
+                           "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
+                           cmdid, reply_id,
+                           to_msec - jiffies_to_msecs(remain));
+       }
+       wil->reply_id = 0;
+       wil->reply_buf = NULL;
+       wil->reply_size = 0;
+ out:
+       mutex_unlock(&wil->wmi_mutex);
+
+       return rc;
+}
+
+int wmi_echo(struct wil6210_priv *wil)
+{
+       struct wmi_echo_cmd cmd = {
+               .value = cpu_to_le32(0x12345678),
+       };
+
+       return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
+                        WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+}
+
+int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+{
+       struct wmi_set_mac_address_cmd cmd;
+
+       memcpy(cmd.mac, addr, ETH_ALEN);
+
+       wil_dbg_WMI(wil, "Set MAC %pM\n", addr);
+
+       return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+{
+       struct wmi_bcon_ctrl_cmd cmd = {
+               .bcon_interval = cpu_to_le16(bi),
+               .network_type = wmi_nettype,
+               .disable_sec_offload = 1,
+       };
+
+       if (!wil->secure_pcp)
+               cmd.disable_sec = 1;
+
+       return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
+{
+       struct wmi_set_ssid_cmd cmd = {
+               .ssid_len = cpu_to_le32(ssid_len),
+       };
+
+       if (ssid_len > sizeof(cmd.ssid))
+               return -EINVAL;
+
+       memcpy(cmd.ssid, ssid, ssid_len);
+
+       return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
+{
+       int rc;
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_set_ssid_cmd cmd;
+       } __packed reply;
+       int len; /* reply.cmd.ssid_len in CPU order */
+
+       rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
+                     &reply, sizeof(reply), 20);
+       if (rc)
+               return rc;
+
+       len = le32_to_cpu(reply.cmd.ssid_len);
+       if (len > sizeof(reply.cmd.ssid))
+               return -EINVAL;
+
+       *ssid_len = len;
+       memcpy(ssid, reply.cmd.ssid, len);
+
+       return 0;
+}
+
+int wmi_set_channel(struct wil6210_priv *wil, int channel)
+{
+       struct wmi_set_pcp_channel_cmd cmd = {
+               .channel = channel - 1,
+       };
+
+       return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_get_channel(struct wil6210_priv *wil, int *channel)
+{
+       int rc;
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_set_pcp_channel_cmd cmd;
+       } __packed reply;
+
+       rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
+                     WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
+       if (rc)
+               return rc;
+
+       if (reply.cmd.channel > 3)
+               return -EINVAL;
+
+       *channel = reply.cmd.channel + 1;
+
+       return 0;
+}
+
+int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+       struct wmi_eapol_tx_cmd *cmd;
+       struct ethhdr *eth;
+       u16 eapol_len = skb->len - ETH_HLEN;
+       void *eapol = skb->data + ETH_HLEN;
+       uint i;
+       int rc;
+
+       skb_set_mac_header(skb, 0);
+       eth = eth_hdr(skb);
+       wil_dbg_WMI(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+               if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
+                       goto found_dest;
+       }
+
+       return -EINVAL;
+
+ found_dest:
+       /* find out eapol data & len */
+       cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
+       if (!cmd)
+               return -EINVAL;
+
+       memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
+       cmd->eapol_len = cpu_to_le16(eapol_len);
+       memcpy(cmd->eapol, eapol, eapol_len);
+       rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
+       kfree(cmd);
+
+       return rc;
+}
+
+int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr)
+{
+       struct wmi_delete_cipher_key_cmd cmd = {
+               .key_index = key_index,
+       };
+
+       if (mac_addr)
+               memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+       return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
+                      const void *mac_addr, int key_len, const void *key)
+{
+       struct wmi_add_cipher_key_cmd cmd = {
+               .key_index = key_index,
+               .key_usage = WMI_KEY_USE_PAIRWISE,
+               .key_len = key_len,
+       };
+
+       if (!key || (key_len > sizeof(cmd.key)))
+               return -EINVAL;
+
+       memcpy(cmd.key, key, key_len);
+       if (mac_addr)
+               memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
+
+       return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
+}
+
+int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
+{
+       int rc;
+       u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+       struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+       if (!cmd) {
+               wil_err(wil, "kmalloc(%d) failed\n", len);
+               return -ENOMEM;
+       }
+
+       cmd->mgmt_frm_type = type;
+       /* BUG: FW API define ieLen as u8. Will fix FW */
+       cmd->ie_len = cpu_to_le16(ie_len);
+       memcpy(cmd->ie_info, ie, ie_len);
+       rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+       kfree(cmd);
+
+       return rc;
+}
+
+void wmi_event_flush(struct wil6210_priv *wil)
+{
+       struct pending_wmi_event *evt, *t;
+
+       wil_dbg_WMI(wil, "%s()\n", __func__);
+
+       list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
+               list_del(&evt->list);
+               kfree(evt);
+       }
+}
+
+static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
+                                void *d, int len)
+{
+       uint i;
+
+       for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
+               if (wmi_evt_handlers[i].eventid == id) {
+                       wmi_evt_handlers[i].handler(wil, id, d, len);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static void wmi_event_handle(struct wil6210_priv *wil,
+                            struct wil6210_mbox_hdr *hdr)
+{
+       u16 len = le16_to_cpu(hdr->len);
+
+       if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
+           (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
+               struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+               void *evt_data = (void *)(&wmi[1]);
+               u16 id = le16_to_cpu(wmi->id);
+               /* check if someone waits for this event */
+               if (wil->reply_id && wil->reply_id == id) {
+                       if (wil->reply_buf) {
+                               memcpy(wil->reply_buf, wmi,
+                                      min(len, wil->reply_size));
+                       } else {
+                               wmi_evt_call_handler(wil, id, evt_data,
+                                                    len - sizeof(*wmi));
+                       }
+                       wil_dbg_WMI(wil, "Complete WMI 0x%04x\n", id);
+                       complete(&wil->wmi_ready);
+                       return;
+               }
+               /* unsolicited event */
+               /* search for handler */
+               if (!wmi_evt_call_handler(wil, id, evt_data,
+                                         len - sizeof(*wmi))) {
+                       wil_err(wil, "Unhandled event 0x%04x\n", id);
+               }
+       } else {
+               wil_err(wil, "Unknown event type\n");
+               print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
+                              hdr, sizeof(*hdr) + len, true);
+       }
+}
+
+/*
+ * Retrieve next WMI event from the pending list
+ */
+static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
+{
+       ulong flags;
+       struct list_head *ret = NULL;
+
+       spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+       if (!list_empty(&wil->pending_wmi_ev)) {
+               ret = wil->pending_wmi_ev.next;
+               list_del(ret);
+       }
+
+       spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+
+       return ret;
+}
+
+/*
+ * Handler for the WMI events
+ */
+void wmi_event_worker(struct work_struct *work)
+{
+       struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+                                                wmi_event_worker);
+       struct pending_wmi_event *evt;
+       struct list_head *lh;
+
+       while ((lh = next_wmi_ev(wil)) != NULL) {
+               evt = list_entry(lh, struct pending_wmi_event, list);
+               wmi_event_handle(wil, &evt->event.hdr);
+               kfree(evt);
+       }
+}
+
+void wmi_connect_worker(struct work_struct *work)
+{
+       int rc;
+       struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+                                               wmi_connect_worker);
+
+       if (wil->pending_connect_cid < 0) {
+               wil_err(wil, "No connection pending\n");
+               return;
+       }
+
+       wil_dbg_WMI(wil, "Configure for connection CID %d\n",
+                   wil->pending_connect_cid);
+
+       rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
+                              wil->pending_connect_cid, 0);
+       wil->pending_connect_cid = -1;
+       if (rc == 0)
+               wil_link_on(wil);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
new file mode 100644 (file)
index 0000000..3bbf875
--- /dev/null
@@ -0,0 +1,1116 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity .
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file contains the definitions of the WMI protocol specified in the
+ * Wireless Module Interface (WMI) for the Wilocity
+ * MARLON 60 Gigabit wireless solution.
+ * It includes definitions of all the commands and events.
+ * Commands are messages from the host to the WM.
+ * Events are messages from the WM to the host.
+ */
+
+#ifndef __WILOCITY_WMI_H__
+#define __WILOCITY_WMI_H__
+
+/* General */
+
+#define WMI_MAC_LEN            (6)
+#define WMI_PROX_RANGE_NUM     (3)
+
+/* List of Commands */
+enum wmi_command_id {
+       WMI_CONNECT_CMDID               = 0x0001,
+       WMI_DISCONNECT_CMDID            = 0x0003,
+       WMI_START_SCAN_CMDID            = 0x0007,
+       WMI_SET_BSS_FILTER_CMDID        = 0x0009,
+       WMI_SET_PROBED_SSID_CMDID       = 0x000a,
+       WMI_SET_LISTEN_INT_CMDID        = 0x000b,
+       WMI_BCON_CTRL_CMDID             = 0x000f,
+       WMI_ADD_CIPHER_KEY_CMDID        = 0x0016,
+       WMI_DELETE_CIPHER_KEY_CMDID     = 0x0017,
+       WMI_SET_APPIE_CMDID             = 0x003f,
+       WMI_GET_APPIE_CMDID             = 0x0040,
+       WMI_SET_WSC_STATUS_CMDID        = 0x0041,
+       WMI_PXMT_RANGE_CFG_CMDID        = 0x0042,
+       WMI_PXMT_SNR2_RANGE_CFG_CMDID   = 0x0043,
+       WMI_FAST_MEM_ACC_MODE_CMDID     = 0x0300,
+       WMI_MEM_READ_CMDID              = 0x0800,
+       WMI_MEM_WR_CMDID                = 0x0801,
+       WMI_ECHO_CMDID                  = 0x0803,
+       WMI_DEEP_ECHO_CMDID             = 0x0804,
+       WMI_CONFIG_MAC_CMDID            = 0x0805,
+       WMI_CONFIG_PHY_DEBUG_CMDID      = 0x0806,
+       WMI_ADD_STATION_CMDID           = 0x0807,
+       WMI_ADD_DEBUG_TX_PCKT_CMDID     = 0x0808,
+       WMI_PHY_GET_STATISTICS_CMDID    = 0x0809,
+       WMI_FS_TUNE_CMDID               = 0x080a,
+       WMI_CORR_MEASURE_CMDID          = 0x080b,
+       WMI_TEMP_SENSE_CMDID            = 0x080e,
+       WMI_DC_CALIB_CMDID              = 0x080f,
+       WMI_SEND_TONE_CMDID             = 0x0810,
+       WMI_IQ_TX_CALIB_CMDID           = 0x0811,
+       WMI_IQ_RX_CALIB_CMDID           = 0x0812,
+       WMI_SET_UCODE_IDLE_CMDID        = 0x0813,
+       WMI_SET_WORK_MODE_CMDID         = 0x0815,
+       WMI_LO_LEAKAGE_CALIB_CMDID      = 0x0816,
+       WMI_MARLON_R_ACTIVATE_CMDID     = 0x0817,
+       WMI_MARLON_R_READ_CMDID         = 0x0818,
+       WMI_MARLON_R_WRITE_CMDID        = 0x0819,
+       WMI_MARLON_R_TXRX_SEL_CMDID     = 0x081a,
+       MAC_IO_STATIC_PARAMS_CMDID      = 0x081b,
+       MAC_IO_DYNAMIC_PARAMS_CMDID     = 0x081c,
+       WMI_SILENT_RSSI_CALIB_CMDID     = 0x081d,
+       WMI_CFG_RX_CHAIN_CMDID          = 0x0820,
+       WMI_VRING_CFG_CMDID             = 0x0821,
+       WMI_RX_ON_CMDID                 = 0x0822,
+       WMI_VRING_BA_EN_CMDID           = 0x0823,
+       WMI_VRING_BA_DIS_CMDID          = 0x0824,
+       WMI_RCP_ADDBA_RESP_CMDID        = 0x0825,
+       WMI_RCP_DELBA_CMDID             = 0x0826,
+       WMI_SET_SSID_CMDID              = 0x0827,
+       WMI_GET_SSID_CMDID              = 0x0828,
+       WMI_SET_PCP_CHANNEL_CMDID       = 0x0829,
+       WMI_GET_PCP_CHANNEL_CMDID       = 0x082a,
+       WMI_SW_TX_REQ_CMDID             = 0x082b,
+       WMI_RX_OFF_CMDID                = 0x082c,
+       WMI_READ_MAC_RXQ_CMDID          = 0x0830,
+       WMI_READ_MAC_TXQ_CMDID          = 0x0831,
+       WMI_WRITE_MAC_RXQ_CMDID         = 0x0832,
+       WMI_WRITE_MAC_TXQ_CMDID         = 0x0833,
+       WMI_WRITE_MAC_XQ_FIELD_CMDID    = 0x0834,
+       WMI_MLME_PUSH_CMDID             = 0x0835,
+       WMI_BEAMFORMING_MGMT_CMDID      = 0x0836,
+       WMI_BF_TXSS_MGMT_CMDID          = 0x0837,
+       WMI_BF_SM_MGMT_CMDID            = 0x0838,
+       WMI_BF_RXSS_MGMT_CMDID          = 0x0839,
+       WMI_SET_SECTORS_CMDID           = 0x0849,
+       WMI_MAINTAIN_PAUSE_CMDID        = 0x0850,
+       WMI_MAINTAIN_RESUME_CMDID       = 0x0851,
+       WMI_RS_MGMT_CMDID               = 0x0852,
+       WMI_RF_MGMT_CMDID               = 0x0853,
+       /* Performance monitoring commands */
+       WMI_BF_CTRL_CMDID               = 0x0862,
+       WMI_NOTIFY_REQ_CMDID            = 0x0863,
+       WMI_GET_STATUS_CMDID            = 0x0864,
+       WMI_UNIT_TEST_CMDID             = 0x0900,
+       WMI_HICCUP_CMDID                = 0x0901,
+       WMI_FLASH_READ_CMDID            = 0x0902,
+       WMI_FLASH_WRITE_CMDID           = 0x0903,
+       WMI_SECURITY_UNIT_TEST_CMDID    = 0x0904,
+
+       WMI_SET_MAC_ADDRESS_CMDID       = 0xf003,
+       WMI_ABORT_SCAN_CMDID            = 0xf007,
+       WMI_SET_PMK_CMDID               = 0xf028,
+
+       WMI_SET_PROMISCUOUS_MODE_CMDID  = 0xf041,
+       WMI_GET_PMK_CMDID               = 0xf048,
+       WMI_SET_PASSPHRASE_CMDID        = 0xf049,
+       WMI_SEND_ASSOC_RES_CMDID        = 0xf04a,
+       WMI_SET_ASSOC_REQ_RELAY_CMDID   = 0xf04b,
+       WMI_EAPOL_TX_CMDID              = 0xf04c,
+       WMI_MAC_ADDR_REQ_CMDID          = 0xf04d,
+       WMI_FW_VER_CMDID                = 0xf04e,
+};
+
+/*
+ * Commands data structures
+ */
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+       WMI_FRAME_BEACON        = 0,
+       WMI_FRAME_PROBE_REQ     = 1,
+       WMI_FRAME_PROBE_RESP    = 2,
+       WMI_FRAME_ASSOC_REQ     = 3,
+       WMI_FRAME_ASSOC_RESP    = 4,
+       WMI_NUM_MGMT_FRAME,
+};
+
+/*
+ * WMI_CONNECT_CMDID
+ */
+enum wmi_network_type {
+       WMI_NETTYPE_INFRA               = 0x01,
+       WMI_NETTYPE_ADHOC               = 0x02,
+       WMI_NETTYPE_ADHOC_CREATOR       = 0x04,
+       WMI_NETTYPE_AP                  = 0x10,
+       WMI_NETTYPE_P2P                 = 0x20,
+       WMI_NETTYPE_WBE                 = 0x40, /* PCIE over 60g */
+};
+
+enum wmi_dot11_auth_mode {
+       WMI_AUTH11_OPEN                 = 0x01,
+       WMI_AUTH11_SHARED               = 0x02,
+       WMI_AUTH11_LEAP                 = 0x04,
+       WMI_AUTH11_WSC                  = 0x08,
+};
+
+enum wmi_auth_mode {
+       WMI_AUTH_NONE                   = 0x01,
+       WMI_AUTH_WPA                    = 0x02,
+       WMI_AUTH_WPA2                   = 0x04,
+       WMI_AUTH_WPA_PSK                = 0x08,
+       WMI_AUTH_WPA2_PSK               = 0x10,
+       WMI_AUTH_WPA_CCKM               = 0x20,
+       WMI_AUTH_WPA2_CCKM              = 0x40,
+};
+
+enum wmi_crypto_type {
+       WMI_CRYPT_NONE                  = 0x01,
+       WMI_CRYPT_WEP                   = 0x02,
+       WMI_CRYPT_TKIP                  = 0x04,
+       WMI_CRYPT_AES                   = 0x08,
+       WMI_CRYPT_AES_GCMP              = 0x20,
+};
+
+
+enum wmi_connect_ctrl_flag_bits {
+       WMI_CONNECT_ASSOC_POLICY_USER           = 0x0001,
+       WMI_CONNECT_SEND_REASSOC                = 0x0002,
+       WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER    = 0x0004,
+       WMI_CONNECT_PROFILE_MATCH_DONE          = 0x0008,
+       WMI_CONNECT_IGNORE_AAC_BEACON           = 0x0010,
+       WMI_CONNECT_CSA_FOLLOW_BSS              = 0x0020,
+       WMI_CONNECT_DO_WPA_OFFLOAD              = 0x0040,
+       WMI_CONNECT_DO_NOT_DEAUTH               = 0x0080,
+};
+
+#define WMI_MAX_SSID_LEN    (32)
+
+struct wmi_connect_cmd {
+       u8 network_type;
+       u8 dot11_auth_mode;
+       u8 auth_mode;
+       u8 pairwise_crypto_type;
+       u8 pairwise_crypto_len;
+       u8 group_crypto_type;
+       u8 group_crypto_len;
+       u8 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+       u8 channel;
+       u8 reserved0;
+       u8 bssid[WMI_MAC_LEN];
+       __le32 ctrl_flags;
+       u8 dst_mac[WMI_MAC_LEN];
+       u8 reserved1[2];
+} __packed;
+
+
+/*
+ * WMI_RECONNECT_CMDID
+ */
+struct wmi_reconnect_cmd {
+       u8 channel;                     /* hint */
+       u8 reserved;
+       u8 bssid[WMI_MAC_LEN];          /* mandatory if set */
+} __packed;
+
+
+/*
+ * WMI_SET_PMK_CMDID
+ */
+
+#define WMI_MIN_KEY_INDEX      (0)
+#define WMI_MAX_KEY_INDEX      (3)
+#define WMI_MAX_KEY_LEN                (32)
+#define WMI_PASSPHRASE_LEN     (64)
+#define WMI_PMK_LEN            (32)
+
+struct  wmi_set_pmk_cmd {
+       u8 pmk[WMI_PMK_LEN];
+} __packed;
+
+
+/*
+ * WMI_SET_PASSPHRASE_CMDID
+ */
+struct wmi_set_passphrase_cmd {
+       u8 ssid[WMI_MAX_SSID_LEN];
+       u8 passphrase[WMI_PASSPHRASE_LEN];
+       u8 ssid_len;
+       u8 passphrase_len;
+} __packed;
+
+/*
+ * WMI_ADD_CIPHER_KEY_CMDID
+ */
+enum wmi_key_usage {
+       WMI_KEY_USE_PAIRWISE    = 0,
+       WMI_KEY_USE_GROUP       = 1,
+       WMI_KEY_USE_TX          = 2,  /* default Tx Key - Static WEP only */
+};
+
+struct wmi_add_cipher_key_cmd {
+       u8 key_index;
+       u8 key_type;
+       u8 key_usage;           /* enum wmi_key_usage */
+       u8 key_len;
+       u8 key_rsc[8];          /* key replay sequence counter */
+       u8 key[WMI_MAX_KEY_LEN];
+       u8 key_op_ctrl;         /* Additional Key Control information */
+       u8 mac[WMI_MAC_LEN];
+} __packed;
+
+/*
+ * WMI_DELETE_CIPHER_KEY_CMDID
+ */
+struct wmi_delete_cipher_key_cmd {
+       u8 key_index;
+       u8 mac[WMI_MAC_LEN];
+} __packed;
+
+
+/*
+ * WMI_START_SCAN_CMDID
+ *
+ * Start L1 scan operation
+ *
+ * Returned events:
+ * - WMI_RX_MGMT_PACKET_EVENTID - for every probe resp.
+ * - WMI_SCAN_COMPLETE_EVENTID
+ */
+enum wmi_scan_type {
+       WMI_LONG_SCAN           = 0,
+       WMI_SHORT_SCAN          = 1,
+};
+
+struct wmi_start_scan_cmd {
+       u8 reserved[8];
+       __le32 home_dwell_time; /* Max duration in the home channel(ms) */
+       __le32 force_scan_interval;     /* Time interval between scans (ms)*/
+       u8 scan_type;           /* wmi_scan_type */
+       u8 num_channels;                /* how many channels follow */
+       struct {
+               u8 channel;
+               u8 reserved;
+       } channel_list[0];      /* channels ID's */
+                               /* 0 - 58320 MHz */
+                               /* 1 - 60480 MHz */
+                               /* 2 - 62640 MHz */
+} __packed;
+
+/*
+ * WMI_SET_PROBED_SSID_CMDID
+ */
+#define MAX_PROBED_SSID_INDEX   (15)
+
+enum wmi_ssid_flag {
+       WMI_SSID_FLAG_DISABLE   = 0,    /* disables entry */
+       WMI_SSID_FLAG_SPECIFIC  = 1,    /* probes specified ssid */
+       WMI_SSID_FLAG_ANY       = 2,    /* probes for any ssid */
+};
+
+struct wmi_probed_ssid_cmd {
+       u8 entry_index;                 /* 0 to MAX_PROBED_SSID_INDEX */
+       u8 flag;                        /* enum wmi_ssid_flag */
+       u8 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_APPIE_CMDID
+ * Add Application specified IE to a management frame
+ */
+struct wmi_set_appie_cmd {
+       u8 mgmt_frm_type;       /* enum wmi_mgmt_frame_type */
+       u8 reserved;
+       __le16 ie_len;  /* Length of the IE to be added to MGMT frame */
+       u8 ie_info[0];
+} __packed;
+
+#define WMI_MAX_IE_LEN (1024)
+
+struct wmi_pxmt_range_cfg_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 range;
+} __packed;
+
+struct wmi_pxmt_snr2_range_cfg_cmd {
+       s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+} __packed;
+
+/*
+ * WMI_RF_MGMT_CMDID
+ */
+enum wmi_rf_mgmt_type {
+       WMI_RF_MGMT_W_DISABLE   = 0,
+       WMI_RF_MGMT_W_ENABLE    = 1,
+       WMI_RF_MGMT_GET_STATUS  = 2,
+};
+
+struct wmi_rf_mgmt_cmd {
+       __le32 rf_mgmt_type;
+} __packed;
+
+/*
+ * WMI_SET_SSID_CMDID
+ */
+struct wmi_set_ssid_cmd {
+       __le32 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_SET_PCP_CHANNEL_CMDID
+ */
+struct wmi_set_pcp_channel_cmd {
+       u8 channel;
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_BCON_CTRL_CMDID
+ */
+struct wmi_bcon_ctrl_cmd {
+       __le16 bcon_interval;
+       __le16 frag_num;
+       __le64 ss_mask;
+       u8 network_type;
+       u8 reserved;
+       u8 disable_sec_offload;
+       u8 disable_sec;
+} __packed;
+
+/*
+ * WMI_SW_TX_REQ_CMDID
+ */
+struct wmi_sw_tx_req_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 len;
+       u8 payload[0];
+} __packed;
+
+/*
+ * WMI_VRING_CFG_CMDID
+ */
+
+struct wmi_sw_ring_cfg {
+       __le64 ring_mem_base;
+       __le16 ring_size;
+       __le16 max_mpdu_size;
+} __packed;
+
+struct wmi_vring_cfg_schd {
+       __le16 priority;
+       __le16 timeslot_us;
+} __packed;
+
+enum wmi_vring_cfg_encap_trans_type {
+       WMI_VRING_ENC_TYPE_802_3                = 0,
+       WMI_VRING_ENC_TYPE_NATIVE_WIFI          = 1,
+};
+
+enum wmi_vring_cfg_ds_cfg {
+       WMI_VRING_DS_PBSS                       = 0,
+       WMI_VRING_DS_STATION                    = 1,
+       WMI_VRING_DS_AP                         = 2,
+       WMI_VRING_DS_ADDR4                      = 3,
+};
+
+enum wmi_vring_cfg_nwifi_ds_trans_type {
+       WMI_NWIFI_TX_TRANS_MODE_NO              = 0,
+       WMI_NWIFI_TX_TRANS_MODE_AP2PBSS         = 1,
+       WMI_NWIFI_TX_TRANS_MODE_STA2PBSS        = 2,
+};
+
+enum wmi_vring_cfg_schd_params_priority {
+       WMI_SCH_PRIO_REGULAR                    = 0,
+       WMI_SCH_PRIO_HIGH                       = 1,
+};
+
+struct wmi_vring_cfg {
+       struct wmi_sw_ring_cfg tx_sw_ring;
+       u8 ringid;                              /* 0-23 vrings */
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 encap_trans_type;
+       u8 ds_cfg;                              /* 802.3 DS cfg */
+       u8 nwifi_ds_trans_type;
+
+       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+       #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+       #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+       #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+       #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+       u8 mac_ctrl;
+
+       #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+       #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+       #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
+       u8 to_resolution;
+       u8 agg_max_wsize;
+       struct wmi_vring_cfg_schd schd_params;
+} __packed;
+
+enum wmi_vring_cfg_cmd_action {
+       WMI_VRING_CMD_ADD                       = 0,
+       WMI_VRING_CMD_MODIFY                    = 1,
+       WMI_VRING_CMD_DELETE                    = 2,
+};
+
+struct wmi_vring_cfg_cmd {
+       __le32 action;
+       struct wmi_vring_cfg vring_cfg;
+} __packed;
+
+/*
+ * WMI_VRING_BA_EN_CMDID
+ */
+struct wmi_vring_ba_en_cmd {
+       u8 ringid;
+       u8 agg_max_wsize;
+       __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_VRING_BA_DIS_CMDID
+ */
+struct wmi_vring_ba_dis_cmd {
+       u8 ringid;
+       u8 reserved;
+       __le16 reason;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_CMDID
+ */
+struct wmi_notify_req_cmd {
+       u8 cid;
+       u8 reserved[3];
+       __le32 interval_usec;
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_CMDID
+ */
+enum wmi_sniffer_cfg_mode {
+       WMI_SNIFFER_OFF                         = 0,
+       WMI_SNIFFER_ON                          = 1,
+};
+
+enum wmi_sniffer_cfg_phy_info_mode {
+       WMI_SNIFFER_PHY_INFO_DISABLED           = 0,
+       WMI_SNIFFER_PHY_INFO_ENABLED            = 1,
+};
+
+enum wmi_sniffer_cfg_phy_support {
+       WMI_SNIFFER_CP                          = 0,
+       WMI_SNIFFER_DP                          = 1,
+       WMI_SNIFFER_BOTH_PHYS                   = 2,
+};
+
+struct wmi_sniffer_cfg {
+       __le32 mode;            /* enum wmi_sniffer_cfg_mode */
+       __le32 phy_info_mode;   /* enum wmi_sniffer_cfg_phy_info_mode */
+       __le32 phy_support;     /* enum wmi_sniffer_cfg_phy_support */
+       u8 channel;
+       u8 reserved[3];
+} __packed;
+
+enum wmi_cfg_rx_chain_cmd_action {
+       WMI_RX_CHAIN_ADD                        = 0,
+       WMI_RX_CHAIN_DEL                        = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_decap_trans_type {
+       WMI_DECAP_TYPE_802_3                    = 0,
+       WMI_DECAP_TYPE_NATIVE_WIFI              = 1,
+};
+
+enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
+       WMI_NWIFI_RX_TRANS_MODE_NO              = 0,
+       WMI_NWIFI_RX_TRANS_MODE_PBSS2AP         = 1,
+       WMI_NWIFI_RX_TRANS_MODE_PBSS2STA        = 2,
+};
+
+struct wmi_cfg_rx_chain_cmd {
+       __le32 action;
+       struct wmi_sw_ring_cfg rx_sw_ring;
+       u8 mid;
+       u8 decap_trans_type;
+
+       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+       #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+       u8 l2_802_3_offload_ctrl;
+
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+       #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+       u8 l2_nwifi_offload_ctrl;
+
+       u8 vlan_id;
+       u8 nwifi_ds_trans_type;
+
+       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+       #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+       #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+       u8 l3_l4_ctrl;
+
+       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+       #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+       #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+       #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+       #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+       #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+       #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+       #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+       u8 ring_ctrl;
+
+       __le16 prefetch_thrsh;
+       __le16 wb_thrsh;
+       __le32 itr_value;
+       __le16 host_thrsh;
+       u8 reserved[2];
+       struct wmi_sniffer_cfg sniffer_cfg;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_RESP_CMDID
+ */
+struct wmi_rcp_addba_resp_cmd {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 dialog_token;
+       __le16 status_code;
+       __le16 ba_param_set;    /* ieee80211_ba_parameterset field to send */
+       __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_RCP_DELBA_CMDID
+ */
+struct wmi_rcp_delba_cmd {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 reserved;
+       __le16 reason;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_CMDID
+ */
+struct wmi_rcp_addba_req_cmd {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 dialog_token;
+       /* ieee80211_ba_parameterset field as it received */
+       __le16 ba_param_set;
+       __le16 ba_timeout;
+       /* ieee80211_ba_seqstrl field as it received */
+       __le16 ba_seq_ctrl;
+} __packed;
+
+/*
+ * WMI_SET_MAC_ADDRESS_CMDID
+ */
+struct wmi_set_mac_address_cmd {
+       u8 mac[WMI_MAC_LEN];
+       u8 reserved[2];
+} __packed;
+
+
+/*
+* WMI_EAPOL_TX_CMDID
+*/
+struct wmi_eapol_tx_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 eapol_len;
+       u8 eapol[0];
+} __packed;
+
+/*
+ * WMI_ECHO_CMDID
+ *
+ * Check FW is alive
+ *
+ * WMI_DEEP_ECHO_CMDID
+ *
+ * Check FW and ucode are alive
+ *
+ * Returned event: WMI_ECHO_RSP_EVENTID
+ * same event for both commands
+ */
+struct wmi_echo_cmd {
+       __le32 value;
+} __packed;
+
+/*
+ * WMI Events
+ */
+
+/*
+ * List of Events (target to host)
+ */
+enum wmi_event_id {
+       WMI_IMM_RSP_EVENTID                     = 0x0000,
+       WMI_READY_EVENTID                       = 0x1001,
+       WMI_CONNECT_EVENTID                     = 0x1002,
+       WMI_DISCONNECT_EVENTID                  = 0x1003,
+       WMI_SCAN_COMPLETE_EVENTID               = 0x100a,
+       WMI_REPORT_STATISTICS_EVENTID           = 0x100b,
+       WMI_RD_MEM_RSP_EVENTID                  = 0x1800,
+       WMI_FW_READY_EVENTID                    = 0x1801,
+       WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID      = 0x0200,
+       WMI_ECHO_RSP_EVENTID                    = 0x1803,
+       WMI_CONFIG_MAC_DONE_EVENTID             = 0x1805,
+       WMI_CONFIG_PHY_DEBUG_DONE_EVENTID       = 0x1806,
+       WMI_ADD_STATION_DONE_EVENTID            = 0x1807,
+       WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID      = 0x1808,
+       WMI_PHY_GET_STATISTICS_EVENTID          = 0x1809,
+       WMI_FS_TUNE_DONE_EVENTID                = 0x180a,
+       WMI_CORR_MEASURE_DONE_EVENTID           = 0x180b,
+       WMI_TEMP_SENSE_DONE_EVENTID             = 0x180e,
+       WMI_DC_CALIB_DONE_EVENTID               = 0x180f,
+       WMI_IQ_TX_CALIB_DONE_EVENTID            = 0x1811,
+       WMI_IQ_RX_CALIB_DONE_EVENTID            = 0x1812,
+       WMI_SET_WORK_MODE_DONE_EVENTID          = 0x1815,
+       WMI_LO_LEAKAGE_CALIB_DONE_EVENTID       = 0x1816,
+       WMI_MARLON_R_ACTIVATE_DONE_EVENTID      = 0x1817,
+       WMI_MARLON_R_READ_DONE_EVENTID          = 0x1818,
+       WMI_MARLON_R_WRITE_DONE_EVENTID         = 0x1819,
+       WMI_MARLON_R_TXRX_SEL_DONE_EVENTID      = 0x181a,
+       WMI_SILENT_RSSI_CALIB_DONE_EVENTID      = 0x181d,
+
+       WMI_CFG_RX_CHAIN_DONE_EVENTID           = 0x1820,
+       WMI_VRING_CFG_DONE_EVENTID              = 0x1821,
+       WMI_RX_ON_DONE_EVENTID                  = 0x1822,
+       WMI_BA_STATUS_EVENTID                   = 0x1823,
+       WMI_RCP_ADDBA_REQ_EVENTID               = 0x1824,
+       WMI_ADDBA_RESP_SENT_EVENTID             = 0x1825,
+       WMI_DELBA_EVENTID                       = 0x1826,
+       WMI_GET_SSID_EVENTID                    = 0x1828,
+       WMI_GET_PCP_CHANNEL_EVENTID             = 0x182a,
+       WMI_SW_TX_COMPLETE_EVENTID              = 0x182b,
+       WMI_RX_OFF_DONE_EVENTID                 = 0x182c,
+
+       WMI_READ_MAC_RXQ_EVENTID                = 0x1830,
+       WMI_READ_MAC_TXQ_EVENTID                = 0x1831,
+       WMI_WRITE_MAC_RXQ_EVENTID               = 0x1832,
+       WMI_WRITE_MAC_TXQ_EVENTID               = 0x1833,
+       WMI_WRITE_MAC_XQ_FIELD_EVENTID          = 0x1834,
+
+       WMI_BEAFORMING_MGMT_DONE_EVENTID        = 0x1836,
+       WMI_BF_TXSS_MGMT_DONE_EVENTID           = 0x1837,
+       WMI_BF_RXSS_MGMT_DONE_EVENTID           = 0x1839,
+       WMI_RS_MGMT_DONE_EVENTID                = 0x1852,
+       WMI_RF_MGMT_STATUS_EVENTID              = 0x1853,
+       WMI_BF_SM_MGMT_DONE_EVENTID             = 0x1838,
+       WMI_RX_MGMT_PACKET_EVENTID              = 0x1840,
+
+       /* Performance monitoring events */
+       WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
+       WMI_WBE_LINKDOWN_EVENTID                = 0x1861,
+
+       WMI_BF_CTRL_DONE_EVENTID                = 0x1862,
+       WMI_NOTIFY_REQ_DONE_EVENTID             = 0x1863,
+       WMI_GET_STATUS_DONE_EVENTID             = 0x1864,
+
+       WMI_UNIT_TEST_EVENTID                   = 0x1900,
+       WMI_FLASH_READ_DONE_EVENTID             = 0x1902,
+       WMI_FLASH_WRITE_DONE_EVENTID            = 0x1903,
+
+       WMI_SET_CHANNEL_EVENTID                 = 0x9000,
+       WMI_ASSOC_REQ_EVENTID                   = 0x9001,
+       WMI_EAPOL_RX_EVENTID                    = 0x9002,
+       WMI_MAC_ADDR_RESP_EVENTID               = 0x9003,
+       WMI_FW_VER_EVENTID                      = 0x9004,
+};
+
+/*
+ * Events data structures
+ */
+
+/*
+ * WMI_RF_MGMT_STATUS_EVENTID
+ */
+enum wmi_rf_status {
+       WMI_RF_ENABLED                  = 0,
+       WMI_RF_DISABLED_HW              = 1,
+       WMI_RF_DISABLED_SW              = 2,
+       WMI_RF_DISABLED_HW_SW           = 3,
+};
+
+struct wmi_rf_mgmt_status_event {
+       __le32 rf_status;
+} __packed;
+
+/*
+ * WMI_GET_STATUS_DONE_EVENTID
+ */
+struct wmi_get_status_done_event {
+       __le32 is_associated;
+       u8 cid;
+       u8 reserved0[3];
+       u8 bssid[WMI_MAC_LEN];
+       u8 channel;
+       u8 reserved1;
+       u8 network_type;
+       u8 reserved2[3];
+       __le32 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+       __le32 rf_status;
+       __le32 is_secured;
+} __packed;
+
+/*
+ * WMI_FW_VER_EVENTID
+ */
+struct wmi_fw_ver_event {
+       u8 major;
+       u8 minor;
+       __le16 subminor;
+       __le16 build;
+} __packed;
+
+/*
+* WMI_MAC_ADDR_RESP_EVENTID
+*/
+struct wmi_mac_addr_resp_event {
+       u8 mac[WMI_MAC_LEN];
+       u8 auth_mode;
+       u8 crypt_mode;
+       __le32 offload_mode;
+} __packed;
+
+/*
+* WMI_EAPOL_RX_EVENTID
+*/
+struct wmi_eapol_rx_event {
+       u8 src_mac[WMI_MAC_LEN];
+       __le16 eapol_len;
+       u8 eapol[0];
+} __packed;
+
+/*
+* WMI_READY_EVENTID
+*/
+enum wmi_phy_capability {
+       WMI_11A_CAPABILITY              = 1,
+       WMI_11G_CAPABILITY              = 2,
+       WMI_11AG_CAPABILITY             = 3,
+       WMI_11NA_CAPABILITY             = 4,
+       WMI_11NG_CAPABILITY             = 5,
+       WMI_11NAG_CAPABILITY            = 6,
+       WMI_11AD_CAPABILITY             = 7,
+       WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+};
+
+struct wmi_ready_event {
+       __le32 sw_version;
+       __le32 abi_version;
+       u8 mac[WMI_MAC_LEN];
+       u8 phy_capability;              /* enum wmi_phy_capability */
+       u8 reserved;
+} __packed;
+
+/*
+ * WMI_NOTIFY_REQ_DONE_EVENTID
+ */
+struct wmi_notify_req_done_event {
+       __le32 status;
+       __le64 tsf;
+       __le32 snr_val;
+       __le32 tx_tpt;
+       __le32 tx_goodput;
+       __le32 rx_goodput;
+       __le16 bf_mcs;
+       __le16 my_rx_sector;
+       __le16 my_tx_sector;
+       __le16 other_rx_sector;
+       __le16 other_tx_sector;
+       __le16 range;
+} __packed;
+
+/*
+ * WMI_CONNECT_EVENTID
+ */
+struct wmi_connect_event {
+       u8 channel;
+       u8 reserved0;
+       u8 bssid[WMI_MAC_LEN];
+       __le16 listen_interval;
+       __le16 beacon_interval;
+       u8 network_type;
+       u8 reserved1[3];
+       u8 beacon_ie_len;
+       u8 assoc_req_len;
+       u8 assoc_resp_len;
+       u8 cid;
+       u8 reserved2[3];
+       u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_DISCONNECT_EVENTID
+ */
+enum wmi_disconnect_reason {
+       WMI_DIS_REASON_NO_NETWORK_AVAIL         = 1,
+       WMI_DIS_REASON_LOST_LINK                = 2, /* bmiss */
+       WMI_DIS_REASON_DISCONNECT_CMD           = 3,
+       WMI_DIS_REASON_BSS_DISCONNECTED         = 4,
+       WMI_DIS_REASON_AUTH_FAILED              = 5,
+       WMI_DIS_REASON_ASSOC_FAILED             = 6,
+       WMI_DIS_REASON_NO_RESOURCES_AVAIL       = 7,
+       WMI_DIS_REASON_CSERV_DISCONNECT         = 8,
+       WMI_DIS_REASON_INVALID_PROFILE          = 10,
+       WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH    = 11,
+       WMI_DIS_REASON_PROFILE_MISMATCH         = 12,
+       WMI_DIS_REASON_CONNECTION_EVICTED       = 13,
+       WMI_DIS_REASON_IBSS_MERGE               = 14,
+};
+
+struct wmi_disconnect_event {
+       __le16 protocol_reason_status;  /* reason code, see 802.11 spec. */
+       u8 bssid[WMI_MAC_LEN];          /* set if known */
+       u8 disconnect_reason;           /* see wmi_disconnect_reason_e */
+       u8 assoc_resp_len;
+       u8 assoc_info[0];
+} __packed;
+
+/*
+ * WMI_SCAN_COMPLETE_EVENTID
+ */
+struct wmi_scan_complete_event {
+       __le32 status;
+} __packed;
+
+/*
+ * WMI_BA_STATUS_EVENTID
+ */
+enum wmi_vring_ba_status {
+       WMI_BA_AGREED                   = 0,
+       WMI_BA_NON_AGREED               = 1,
+};
+
+struct wmi_vring_ba_status_event {
+       __le16 status;
+       u8 reserved[2];
+       u8 ringid;
+       u8 agg_wsize;
+       __le16 ba_timeout;
+} __packed;
+
+/*
+ * WMI_DELBA_EVENTID
+ */
+struct wmi_delba_event {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 from_initiator;
+       __le16 reason;
+} __packed;
+
+/*
+ * WMI_VRING_CFG_DONE_EVENTID
+ */
+enum wmi_vring_cfg_done_event_status {
+       WMI_VRING_CFG_SUCCESS           = 0,
+       WMI_VRING_CFG_FAILURE           = 1,
+};
+
+struct wmi_vring_cfg_done_event {
+       u8 ringid;
+       u8 status;
+       u8 reserved[2];
+       __le32 tx_vring_tail_ptr;
+} __packed;
+
+/*
+ * WMI_ADDBA_RESP_SENT_EVENTID
+ */
+enum wmi_rcp_addba_resp_sent_event_status {
+       WMI_ADDBA_SUCCESS               = 0,
+       WMI_ADDBA_FAIL                  = 1,
+};
+
+struct wmi_rcp_addba_resp_sent_event {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 reserved;
+       __le16 status;
+} __packed;
+
+/*
+ * WMI_RCP_ADDBA_REQ_EVENTID
+ */
+struct wmi_rcp_addba_req_event {
+
+       #define CIDXTID_CID_POS (0)
+       #define CIDXTID_CID_LEN (4)
+       #define CIDXTID_CID_MSK (0xF)
+       #define CIDXTID_TID_POS (4)
+       #define CIDXTID_TID_LEN (4)
+       #define CIDXTID_TID_MSK (0xF0)
+       u8 cidxtid;
+
+       u8 dialog_token;
+       __le16 ba_param_set;    /* ieee80211_ba_parameterset as it received */
+       __le16 ba_timeout;
+       __le16 ba_seq_ctrl;     /* ieee80211_ba_seqstrl field as it received */
+} __packed;
+
+/*
+ * WMI_CFG_RX_CHAIN_DONE_EVENTID
+ */
+enum wmi_cfg_rx_chain_done_event_status {
+       WMI_CFG_RX_CHAIN_SUCCESS        = 1,
+};
+
+struct wmi_cfg_rx_chain_done_event {
+       __le32 rx_ring_tail_ptr;        /* Rx V-Ring Tail pointer */
+       __le32 status;
+} __packed;
+
+/*
+ * WMI_WBE_LINKDOWN_EVENTID
+ */
+enum wmi_wbe_link_down_event_reason {
+       WMI_WBE_REASON_USER_REQUEST     = 0,
+       WMI_WBE_REASON_RX_DISASSOC      = 1,
+       WMI_WBE_REASON_BAD_PHY_LINK     = 2,
+};
+
+struct wmi_wbe_link_down_event {
+       u8 cid;
+       u8 reserved[3];
+       __le32 reason;
+} __packed;
+
+/*
+ * WMI_DATA_PORT_OPEN_EVENTID
+ */
+struct wmi_data_port_open_event {
+       u8 cid;
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_PCP_CHANNEL_EVENTID
+ */
+struct wmi_get_pcp_channel_event {
+       u8 channel;
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SW_TX_COMPLETE_EVENTID
+ */
+enum wmi_sw_tx_status {
+       WMI_TX_SW_STATUS_SUCCESS                = 0,
+       WMI_TX_SW_STATUS_FAILED_NO_RESOURCES    = 1,
+       WMI_TX_SW_STATUS_FAILED_TX              = 2,
+};
+
+struct wmi_sw_tx_complete_event {
+       u8 status;      /* enum wmi_sw_tx_status */
+       u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_GET_SSID_EVENTID
+ */
+struct wmi_get_ssid_event {
+       __le32 ssid_len;
+       u8 ssid[WMI_MAX_SSID_LEN];
+} __packed;
+
+/*
+ * WMI_RX_MGMT_PACKET_EVENTID
+ */
+struct wmi_rx_mgmt_info {
+       u8 mcs;
+       s8 snr;
+       __le16 range;
+       __le16 stype;
+       __le16 status;
+       __le32 len;
+       u8 qid;
+       u8 mid;
+       u8 cid;
+       u8 channel;     /* From Radio MNGR */
+} __packed;
+
+struct wmi_rx_mgmt_packet_event {
+       struct wmi_rx_mgmt_info info;
+       u8 payload[0];
+} __packed;
+
+/*
+ * WMI_ECHO_RSP_EVENTID
+ */
+struct wmi_echo_event {
+       __le32 echoed_value;
+} __packed;
+
+#endif /* __WILOCITY_WMI_H__ */
index b298e5d68be2f0a58cf02d45d2ccd9a1e1e464bd..10e288d470e75be03ef936048fe70574ac3ea2c2 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/hw_random.h>
 #include <linux/bcma/bcma.h>
 #include <linux/ssb/ssb.h>
+#include <linux/completion.h>
 #include <net/mac80211.h>
 
 #include "debugfs.h"
@@ -722,6 +723,10 @@ enum b43_firmware_file_type {
 struct b43_request_fw_context {
        /* The device we are requesting the fw for. */
        struct b43_wldev *dev;
+       /* a completion event structure needed if this call is asynchronous */
+       struct completion fw_load_complete;
+       /* a pointer to the firmware object */
+       const struct firmware *blob;
        /* The type of firmware to request. */
        enum b43_firmware_file_type req_type;
        /* Error messages for each firmware type. */
index 16ab280359bd9ffbb9ee68770606d949b3060e6a..806e34c19281792b4444ad1e6b8ea03724c9c773 100644 (file)
@@ -2088,11 +2088,18 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
                b43warn(wl, text);
 }
 
+static void b43_fw_cb(const struct firmware *firmware, void *context)
+{
+       struct b43_request_fw_context *ctx = context;
+
+       ctx->blob = firmware;
+       complete(&ctx->fw_load_complete);
+}
+
 int b43_do_request_fw(struct b43_request_fw_context *ctx,
                      const char *name,
-                     struct b43_firmware_file *fw)
+                     struct b43_firmware_file *fw, bool async)
 {
-       const struct firmware *blob;
        struct b43_fw_header *hdr;
        u32 size;
        int err;
@@ -2131,11 +2138,31 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
                B43_WARN_ON(1);
                return -ENOSYS;
        }
-       err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev);
+       if (async) {
+               /* do this part asynchronously */
+               init_completion(&ctx->fw_load_complete);
+               err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
+                                             ctx->dev->dev->dev, GFP_KERNEL,
+                                             ctx, b43_fw_cb);
+               if (err < 0) {
+                       pr_err("Unable to load firmware\n");
+                       return err;
+               }
+               /* stall here until fw ready */
+               wait_for_completion(&ctx->fw_load_complete);
+               if (ctx->blob)
+                       goto fw_ready;
+       /* On some ARM systems, the async request will fail, but the next sync
+        * request works. For this reason, we dall through here
+        */
+       }
+       err = request_firmware(&ctx->blob, ctx->fwname,
+                              ctx->dev->dev->dev);
        if (err == -ENOENT) {
                snprintf(ctx->errors[ctx->req_type],
                         sizeof(ctx->errors[ctx->req_type]),
-                        "Firmware file \"%s\" not found\n", ctx->fwname);
+                        "Firmware file \"%s\" not found\n",
+                        ctx->fwname);
                return err;
        } else if (err) {
                snprintf(ctx->errors[ctx->req_type],
@@ -2144,14 +2171,15 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
                         ctx->fwname, err);
                return err;
        }
-       if (blob->size < sizeof(struct b43_fw_header))
+fw_ready:
+       if (ctx->blob->size < sizeof(struct b43_fw_header))
                goto err_format;
-       hdr = (struct b43_fw_header *)(blob->data);
+       hdr = (struct b43_fw_header *)(ctx->blob->data);
        switch (hdr->type) {
        case B43_FW_TYPE_UCODE:
        case B43_FW_TYPE_PCM:
                size = be32_to_cpu(hdr->size);
-               if (size != blob->size - sizeof(struct b43_fw_header))
+               if (size != ctx->blob->size - sizeof(struct b43_fw_header))
                        goto err_format;
                /* fallthrough */
        case B43_FW_TYPE_IV:
@@ -2162,7 +2190,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
                goto err_format;
        }
 
-       fw->data = blob;
+       fw->data = ctx->blob;
        fw->filename = name;
        fw->type = ctx->req_type;
 
@@ -2172,7 +2200,7 @@ err_format:
        snprintf(ctx->errors[ctx->req_type],
                 sizeof(ctx->errors[ctx->req_type]),
                 "Firmware file \"%s\" format error.\n", ctx->fwname);
-       release_firmware(blob);
+       release_firmware(ctx->blob);
 
        return -EPROTO;
 }
@@ -2223,7 +2251,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
                        goto err_no_ucode;
                }
        }
-       err = b43_do_request_fw(ctx, filename, &fw->ucode);
+       err = b43_do_request_fw(ctx, filename, &fw->ucode, true);
        if (err)
                goto err_load;
 
@@ -2235,7 +2263,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
        else
                goto err_no_pcm;
        fw->pcm_request_failed = false;
-       err = b43_do_request_fw(ctx, filename, &fw->pcm);
+       err = b43_do_request_fw(ctx, filename, &fw->pcm, false);
        if (err == -ENOENT) {
                /* We did not find a PCM file? Not fatal, but
                 * core rev <= 10 must do without hwcrypto then. */
@@ -2296,7 +2324,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
        default:
                goto err_no_initvals;
        }
-       err = b43_do_request_fw(ctx, filename, &fw->initvals);
+       err = b43_do_request_fw(ctx, filename, &fw->initvals, false);
        if (err)
                goto err_load;
 
@@ -2355,7 +2383,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
        default:
                goto err_no_initvals;
        }
-       err = b43_do_request_fw(ctx, filename, &fw->initvals_band);
+       err = b43_do_request_fw(ctx, filename, &fw->initvals_band, false);
        if (err)
                goto err_load;
 
index 8c684cd3352942ccaadcf843290d014cbb8599bd..abac25ee958dad54f140c550ef62ba05bf2bc3ae 100644 (file)
@@ -137,9 +137,8 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on);
 
 
 struct b43_request_fw_context;
-int b43_do_request_fw(struct b43_request_fw_context *ctx,
-                     const char *name,
-                     struct b43_firmware_file *fw);
+int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name,
+                     struct b43_firmware_file *fw, bool async);
 void b43_do_release_fw(struct b43_firmware_file *fw);
 
 #endif /* B43_MAIN_H_ */
index 1fbd8ecbe2ea293a28d6a65648fe011a2fff3fa6..e5fd20994bec256df4ec88f96182006e54ada0d6 100644 (file)
@@ -36,6 +36,7 @@
 #include "debug.h"
 
 #define N_TX_QUEUES    4 /* #tx queues on mac80211<->driver interface */
+#define BRCMS_FLUSH_TIMEOUT    500 /* msec */
 
 /* Flags we support */
 #define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
@@ -708,16 +709,29 @@ static void brcms_ops_rfkill_poll(struct ieee80211_hw *hw)
        wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, blocked);
 }
 
+static bool brcms_tx_flush_completed(struct brcms_info *wl)
+{
+       bool result;
+
+       spin_lock_bh(&wl->lock);
+       result = brcms_c_tx_flush_completed(wl->wlc);
+       spin_unlock_bh(&wl->lock);
+       return result;
+}
+
 static void brcms_ops_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct brcms_info *wl = hw->priv;
+       int ret;
 
        no_printk("%s: drop = %s\n", __func__, drop ? "true" : "false");
 
-       /* wait for packet queue and dma fifos to run empty */
-       spin_lock_bh(&wl->lock);
-       brcms_c_wait_for_tx_completion(wl->wlc, drop);
-       spin_unlock_bh(&wl->lock);
+       ret = wait_event_timeout(wl->tx_flush_wq,
+                                brcms_tx_flush_completed(wl),
+                                msecs_to_jiffies(BRCMS_FLUSH_TIMEOUT));
+
+       brcms_dbg_mac80211(wl->wlc->hw->d11core,
+                          "ret=%d\n", jiffies_to_msecs(ret));
 }
 
 static const struct ieee80211_ops brcms_ops = {
@@ -772,6 +786,7 @@ void brcms_dpc(unsigned long data)
 
  done:
        spin_unlock_bh(&wl->lock);
+       wake_up(&wl->tx_flush_wq);
 }
 
 /*
@@ -1020,6 +1035,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
 
        atomic_set(&wl->callbacks, 0);
 
+       init_waitqueue_head(&wl->tx_flush_wq);
+
        /* setup the bottom half handler */
        tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
 
@@ -1407,9 +1424,10 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
 #endif
        t->ms = ms;
        t->periodic = (bool) periodic;
-       t->set = true;
-
-       atomic_inc(&t->wl->callbacks);
+       if (!t->set) {
+               t->set = true;
+               atomic_inc(&t->wl->callbacks);
+       }
 
        ieee80211_queue_delayed_work(hw, &t->dly_wrk, msecs_to_jiffies(ms));
 }
@@ -1608,13 +1626,3 @@ bool brcms_rfkill_set_hw_state(struct brcms_info *wl)
        spin_lock_bh(&wl->lock);
        return blocked;
 }
-
-/*
- * precondition: perimeter lock has been acquired
- */
-void brcms_msleep(struct brcms_info *wl, uint ms)
-{
-       spin_unlock_bh(&wl->lock);
-       msleep(ms);
-       spin_lock_bh(&wl->lock);
-}
index 9358bd5ebd35d016058d1238d11aca6880fdae19..947ccacf43e6f498dacfaceed5c3853c00c99d96 100644 (file)
@@ -68,6 +68,8 @@ struct brcms_info {
        spinlock_t lock;        /* per-device perimeter lock */
        spinlock_t isr_lock;    /* per-device ISR synchronization lock */
 
+       /* tx flush */
+       wait_queue_head_t tx_flush_wq;
 
        /* timer related fields */
        atomic_t callbacks;     /* # outstanding callback functions */
@@ -100,7 +102,6 @@ extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
 extern void brcms_free_timer(struct brcms_timer *timer);
 extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
 extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_msleep(struct brcms_info *wl, uint ms);
 extern void brcms_dpc(unsigned long data);
 extern void brcms_timer(struct brcms_timer *t);
 extern void brcms_fatal_error(struct brcms_info *wl);
index 17594de4199ef58a30488e2c7579ec02c521d9ea..8b5839008af32a11afef6d4042db67a0d8a9c16d 100644 (file)
@@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
 static bool
 brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
 {
-       bool morepending = false;
        struct bcma_device *core;
        struct tx_status txstatus, *txs;
        u32 s1, s2;
@@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
        txs = &txstatus;
        core = wlc_hw->d11core;
        *fatal = false;
-       s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
-       while (!(*fatal)
-              && (s1 & TXS_V)) {
-               /* !give others some time to run! */
-               if (n >= max_tx_num) {
-                       morepending = true;
-                       break;
-               }
 
+       while (n < max_tx_num) {
+               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
                if (s1 == 0xffffffff) {
                        brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
                                  __func__);
                        *fatal = true;
                        return false;
                }
-               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
+               /* only process when valid */
+               if (!(s1 & TXS_V))
+                       break;
 
+               s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
                txs->status = s1 & TXS_STATUS_MASK;
                txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
                txs->sequence = s2 & TXS_SEQ_MASK;
@@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
                txs->lasttxtime = 0;
 
                *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
-
-               s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
+               if (*fatal == true)
+                       return false;
                n++;
        }
 
-       if (*fatal)
-               return false;
-
-       return morepending;
+       return n >= max_tx_num;
 }
 
 static void brcms_c_tbtt(struct brcms_c_info *wlc)
@@ -7518,25 +7511,16 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
        return wlc->band->bandunit;
 }
 
-void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc)
 {
-       int timeout = 20;
        int i;
 
        /* Kick DMA to send any pending AMPDU */
        for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
                if (wlc->hw->di[i])
-                       dma_txflush(wlc->hw->di[i]);
-
-       /* wait for queue and DMA fifos to run dry */
-       while (brcms_txpktpendtot(wlc) > 0) {
-               brcms_msleep(wlc->wl, 1);
-
-               if (--timeout == 0)
-                       break;
-       }
+                       dma_kick_tx(wlc->hw->di[i]);
 
-       WARN_ON_ONCE(timeout == 0);
+       return !brcms_txpktpendtot(wlc);
 }
 
 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
index 4fb2834f4e6483f7c9321666c75598e6f9fa21fd..b0f14b7b8616e63ed5f2ed954ff47eddab7ff0d0 100644 (file)
@@ -314,8 +314,6 @@ extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
 extern void brcms_c_scan_start(struct brcms_c_info *wlc);
 extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
 extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc,
-                                          bool drop);
 extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
 extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
 extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
@@ -332,5 +330,6 @@ extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
 extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
 extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
 extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
 
 #endif                         /* _BRCM_PUB_H_ */
index d92b21a8e5977c19b528dfe9038d23aec96f708c..b3ab7b7becae1c1aae93f99b6a18be26ae7c1c7c 100644 (file)
@@ -2181,9 +2181,10 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
        mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
 }
 
-static void send_scan_event(void *data)
+static void ipw2100_scan_event(struct work_struct *work)
 {
-       struct ipw2100_priv *priv = data;
+       struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
+                                                scan_event.work);
        union iwreq_data wrqu;
 
        wrqu.data.length = 0;
@@ -2191,18 +2192,6 @@ static void send_scan_event(void *data)
        wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
 }
 
-static void ipw2100_scan_event_later(struct work_struct *work)
-{
-       send_scan_event(container_of(work, struct ipw2100_priv,
-                                       scan_event_later.work));
-}
-
-static void ipw2100_scan_event_now(struct work_struct *work)
-{
-       send_scan_event(container_of(work, struct ipw2100_priv,
-                                       scan_event_now));
-}
-
 static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
 {
        IPW_DEBUG_SCAN("scan complete\n");
@@ -2212,13 +2201,11 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
 
        /* Only userspace-requested scan completion events go out immediately */
        if (!priv->user_requested_scan) {
-               if (!delayed_work_pending(&priv->scan_event_later))
-                       schedule_delayed_work(&priv->scan_event_later,
-                                             round_jiffies_relative(msecs_to_jiffies(4000)));
+               schedule_delayed_work(&priv->scan_event,
+                                     round_jiffies_relative(msecs_to_jiffies(4000)));
        } else {
                priv->user_requested_scan = 0;
-               cancel_delayed_work(&priv->scan_event_later);
-               schedule_work(&priv->scan_event_now);
+               mod_delayed_work(system_wq, &priv->scan_event, 0);
        }
 }
 
@@ -4459,8 +4446,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
        cancel_delayed_work_sync(&priv->wx_event_work);
        cancel_delayed_work_sync(&priv->hang_check);
        cancel_delayed_work_sync(&priv->rf_kill);
-       cancel_work_sync(&priv->scan_event_now);
-       cancel_delayed_work_sync(&priv->scan_event_later);
+       cancel_delayed_work_sync(&priv->scan_event);
 }
 
 static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6195,8 +6181,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
        INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
        INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
        INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
-       INIT_WORK(&priv->scan_event_now, ipw2100_scan_event_now);
-       INIT_DELAYED_WORK(&priv->scan_event_later, ipw2100_scan_event_later);
+       INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
 
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                     ipw2100_irq_tasklet, (unsigned long)priv);
index 5fe17cbab1f313c7095595fa0d8a0d31b1435e56..c6d78790cb0d0a44ce939c07841641246f835701 100644 (file)
@@ -577,8 +577,7 @@ struct ipw2100_priv {
        struct delayed_work wx_event_work;
        struct delayed_work hang_check;
        struct delayed_work rf_kill;
-       struct work_struct scan_event_now;
-       struct delayed_work scan_event_later;
+       struct delayed_work scan_event;
 
        int user_requested_scan;
 
index 844f201b7b70080b19c4e7ae70bc31f375214a7c..2c2d6db0536c315e9af127d356740150e9ce3d84 100644 (file)
@@ -4480,18 +4480,11 @@ static void handle_scan_event(struct ipw_priv *priv)
 {
        /* Only userspace-requested scan completion events go out immediately */
        if (!priv->user_requested_scan) {
-               if (!delayed_work_pending(&priv->scan_event))
-                       schedule_delayed_work(&priv->scan_event,
-                                             round_jiffies_relative(msecs_to_jiffies(4000)));
+               schedule_delayed_work(&priv->scan_event,
+                                     round_jiffies_relative(msecs_to_jiffies(4000)));
        } else {
-               union iwreq_data wrqu;
-
                priv->user_requested_scan = 0;
-               cancel_delayed_work(&priv->scan_event);
-
-               wrqu.data.length = 0;
-               wrqu.data.flags = 0;
-               wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
+               mod_delayed_work(system_wq, &priv->scan_event, 0);
        }
 }
 
index d604b4036a764358e52bd2c2ca9d02bd24f14ce9..3726cd6fcd754812d65cb38d89782e96aace0de8 100644 (file)
@@ -3273,7 +3273,7 @@ il3945_store_measurement(struct device *d, struct device_attribute *attr,
 
        if (count) {
                char *p = buffer;
-               strncpy(buffer, buf, min(sizeof(buffer), count));
+               strlcpy(buffer, buf, sizeof(buffer));
                channel = simple_strtoul(p, NULL, 0);
                if (channel)
                        params.channel = channel;
index 7e16d10a7f140e4bc0a1414af1ec317f7284b9c6..90b8970eadf0fa7c296be194c2d18ef9266b1043 100644 (file)
@@ -3958,17 +3958,21 @@ il_connection_init_rx_config(struct il_priv *il)
 
        memset(&il->staging, 0, sizeof(il->staging));
 
-       if (!il->vif) {
+       switch (il->iw_mode) {
+       case NL80211_IFTYPE_UNSPECIFIED:
                il->staging.dev_type = RXON_DEV_TYPE_ESS;
-       } else if (il->vif->type == NL80211_IFTYPE_STATION) {
+               break;
+       case NL80211_IFTYPE_STATION:
                il->staging.dev_type = RXON_DEV_TYPE_ESS;
                il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
-       } else if (il->vif->type == NL80211_IFTYPE_ADHOC) {
+               break;
+       case NL80211_IFTYPE_ADHOC:
                il->staging.dev_type = RXON_DEV_TYPE_IBSS;
                il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
                il->staging.filter_flags =
                    RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
-       } else {
+               break;
+       default:
                IL_ERR("Unsupported interface type %d\n", il->vif->type);
                return;
        }
@@ -4550,8 +4554,7 @@ out:
 EXPORT_SYMBOL(il_mac_add_interface);
 
 static void
-il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
-                     bool mode_change)
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
 {
        lockdep_assert_held(&il->mutex);
 
@@ -4560,9 +4563,7 @@ il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
                il_force_scan_end(il);
        }
 
-       if (!mode_change)
-               il_set_mode(il);
-
+       il_set_mode(il);
 }
 
 void
@@ -4575,8 +4576,8 @@ il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        WARN_ON(il->vif != vif);
        il->vif = NULL;
-
-       il_teardown_interface(il, vif, false);
+       il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
+       il_teardown_interface(il, vif);
        memset(il->bssid, 0, ETH_ALEN);
 
        D_MAC80211("leave\n");
@@ -4685,18 +4686,10 @@ il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        }
 
        /* success */
-       il_teardown_interface(il, vif, true);
        vif->type = newtype;
        vif->p2p = false;
-       err = il_set_mode(il);
-       WARN_ON(err);
-       /*
-        * We've switched internally, but submitting to the
-        * device may have failed for some reason. Mask this
-        * error, because otherwise mac80211 will not switch
-        * (and set the interface type back) and we'll be
-        * out of sync with it.
-        */
+       il->iw_mode = newtype;
+       il_teardown_interface(il, vif);
        err = 0;
 
 out:
index da21328ca8ed84e9e06e3aebde44fade0973e870..279796419ea0ca036800483aad59321e881efbe5 100644 (file)
@@ -1079,6 +1079,8 @@ static void iwlagn_set_tx_status(struct iwl_priv *priv,
 {
        u16 status = le16_to_cpu(tx_resp->status.status);
 
+       info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
        info->status.rates[0].count = tx_resp->failure_frame + 1;
        info->flags |= iwl_tx_status_to_mac80211(status);
        iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
index dad4c4aad91f2a461cdebfc9ac879b51a36db827..8389cd38338ba70766561d13c35591e1073faca6 100644 (file)
@@ -1166,6 +1166,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
        else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
                 !trans_pcie->inta)
                iwl_enable_interrupts(trans);
+       return IRQ_HANDLED;
 
 none:
        /* re-enable interrupts here since we don't have anything to service. */
index a875499f89452d7ab4a4307b72c0475d5829f470..cdb11b3964e27dd68de20ac68ab8442772a070a1 100644 (file)
@@ -1459,7 +1459,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
        struct cfg80211_ssid req_ssid;
        int ret, auth_type = 0;
        struct cfg80211_bss *bss = NULL;
-       u8 is_scanning_required = 0, config_bands = 0;
+       u8 is_scanning_required = 0;
 
        memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
 
@@ -1478,19 +1478,6 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
        /* disconnect before try to associate */
        mwifiex_deauthenticate(priv, NULL);
 
-       if (channel) {
-               if (mode == NL80211_IFTYPE_STATION) {
-                       if (channel->band == IEEE80211_BAND_2GHZ)
-                               config_bands = BAND_B | BAND_G | BAND_GN;
-                       else
-                               config_bands = BAND_A | BAND_AN;
-
-                       if (!((config_bands | priv->adapter->fw_bands) &
-                             ~priv->adapter->fw_bands))
-                               priv->adapter->config_bands = config_bands;
-               }
-       }
-
        /* As this is new association, clear locally stored
         * keys and security related flags */
        priv->sec_info.wpa_enabled = false;
@@ -1707,9 +1694,9 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
 
                if (cfg80211_get_chandef_type(&params->chandef) !=
                                                NL80211_CHAN_NO_HT)
-                       config_bands |= BAND_GN;
+                       config_bands |= BAND_G | BAND_GN;
        } else {
-               if (cfg80211_get_chandef_type(&params->chandef) !=
+               if (cfg80211_get_chandef_type(&params->chandef) ==
                                                NL80211_CHAN_NO_HT)
                        config_bands = BAND_A;
                else
index 13fbc4eb15952fe375be1e10c55518bb2b6b3836..b879e1338a54f5347a7a5f0b955b98a3f3566b66 100644 (file)
@@ -161,7 +161,7 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
 
        if (pdev) {
                card = (struct pcie_service_card *) pci_get_drvdata(pdev);
-               if (!card || card->adapter) {
+               if (!card || !card->adapter) {
                        pr_err("Card or adapter structure is not valid\n");
                        return 0;
                }
index 9189a32b7844b713815636d03517e7532da31f94..973a9d90e9ea06542fe6b7e9169c0de6d4922531 100644 (file)
@@ -1563,7 +1563,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
                        scan_rsp->number_of_sets);
                ret = -1;
-               goto done;
+               goto check_next_scan;
        }
 
        bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
@@ -1634,7 +1634,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                if (!beacon_size || beacon_size > bytes_left) {
                        bss_info += bytes_left;
                        bytes_left = 0;
-                       return -1;
+                       ret = -1;
+                       goto check_next_scan;
                }
 
                /* Initialize the current working beacon pointer for this BSS
@@ -1690,7 +1691,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                dev_err(priv->adapter->dev,
                                        "%s: bytes left < IE length\n",
                                        __func__);
-                               goto done;
+                               goto check_next_scan;
                        }
                        if (element_id == WLAN_EID_DS_PARAMS) {
                                channel = *(current_ptr + sizeof(struct ieee_types_header));
@@ -1753,6 +1754,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
+check_next_scan:
        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
        if (list_empty(&adapter->scan_pending_q)) {
                spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
@@ -1813,7 +1815,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                }
        }
 
-done:
        return ret;
 }
 
index 5a1c1d0e5599ab2689b8fee3d1e81d796dead8fc..f2874c3392b4d964197b9c944b93787336e31a69 100644 (file)
@@ -1752,6 +1752,8 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
 static struct mmc_host *reset_host;
 static void sdio_card_reset_worker(struct work_struct *work)
 {
+       struct mmc_host *target = reset_host;
+
        /* The actual reset operation must be run outside of driver thread.
         * This is because mmc_remove_host() will cause the device to be
         * instantly destroyed, and the driver then needs to end its thread,
@@ -1761,10 +1763,10 @@ static void sdio_card_reset_worker(struct work_struct *work)
         */
 
        pr_err("Resetting card...\n");
-       mmc_remove_host(reset_host);
+       mmc_remove_host(target);
        /* 20ms delay is based on experiment with sdhci controller */
        mdelay(20);
-       mmc_add_host(reset_host);
+       mmc_add_host(target);
 }
 static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
 
@@ -1773,9 +1775,6 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
 
-       if (work_pending(&card_reset_work))
-               return;
-
        reset_host = card->func->card->host;
        schedule_work(&card_reset_work);
 }
index cb682561c43898fd6f27c03ab3aba38adbf269d8..f542bb8ccbc8d4ce6859e8fff1e45784183f8fbb 100644 (file)
@@ -56,7 +56,6 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
  */
 int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
 {
-       bool cancel_flag = false;
        int status;
        struct cmd_ctrl_node *cmd_queued;
 
@@ -70,14 +69,11 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
        atomic_inc(&adapter->cmd_pending);
 
        /* Wait for completion */
-       wait_event_interruptible(adapter->cmd_wait_q.wait,
-                                *(cmd_queued->condition));
-       if (!*(cmd_queued->condition))
-               cancel_flag = true;
-
-       if (cancel_flag) {
-               mwifiex_cancel_pending_ioctl(adapter);
-               dev_dbg(adapter->dev, "cmd cancel\n");
+       status = wait_event_interruptible(adapter->cmd_wait_q.wait,
+                                         *(cmd_queued->condition));
+       if (status) {
+               dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+               return status;
        }
 
        status = adapter->cmd_wait_q.status;
@@ -287,6 +283,20 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                if (ret)
                        goto done;
 
+               if (bss_desc) {
+                       u8 config_bands = 0;
+
+                       if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band)
+                           == HostCmd_SCAN_RADIO_TYPE_BG)
+                               config_bands = BAND_B | BAND_G | BAND_GN;
+                       else
+                               config_bands = BAND_A | BAND_AN;
+
+                       if (!((config_bands | adapter->fw_bands) &
+                             ~adapter->fw_bands))
+                               adapter->config_bands = config_bands;
+               }
+
                ret = mwifiex_check_network_compatibility(priv, bss_desc);
                if (ret)
                        goto done;
@@ -496,8 +506,11 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
                return false;
        }
 
-       wait_event_interruptible(adapter->hs_activate_wait_q,
-                                adapter->hs_activate_wait_q_woken);
+       if (wait_event_interruptible(adapter->hs_activate_wait_q,
+                                    adapter->hs_activate_wait_q_woken)) {
+               dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+               return false;
+       }
 
        return true;
 }
index f221b95b90b3c2935c67e9c272d5f1c2a89000d4..a00a03ea4ec99dc90a71520e1b56daa462632b38 100644 (file)
@@ -318,20 +318,20 @@ struct mwl8k_sta {
 #define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
 
 static const struct ieee80211_channel mwl8k_channels_24[] = {
-       { .center_freq = 2412, .hw_value = 1, },
-       { .center_freq = 2417, .hw_value = 2, },
-       { .center_freq = 2422, .hw_value = 3, },
-       { .center_freq = 2427, .hw_value = 4, },
-       { .center_freq = 2432, .hw_value = 5, },
-       { .center_freq = 2437, .hw_value = 6, },
-       { .center_freq = 2442, .hw_value = 7, },
-       { .center_freq = 2447, .hw_value = 8, },
-       { .center_freq = 2452, .hw_value = 9, },
-       { .center_freq = 2457, .hw_value = 10, },
-       { .center_freq = 2462, .hw_value = 11, },
-       { .center_freq = 2467, .hw_value = 12, },
-       { .center_freq = 2472, .hw_value = 13, },
-       { .center_freq = 2484, .hw_value = 14, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -352,10 +352,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
 };
 
 static const struct ieee80211_channel mwl8k_channels_50[] = {
-       { .center_freq = 5180, .hw_value = 36, },
-       { .center_freq = 5200, .hw_value = 40, },
-       { .center_freq = 5220, .hw_value = 44, },
-       { .center_freq = 5240, .hw_value = 48, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -4250,9 +4250,11 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
        p->amsdu_enabled = 0;
 
        rc = mwl8k_post_cmd(hw, &cmd->header);
+       if (!rc)
+               rc = p->station_id;
        kfree(cmd);
 
-       return rc ? rc : p->station_id;
+       return rc;
 }
 
 static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
index 21b1bbb93a7e41452720a6f9ede17cba3951393e..b80bc4612581857455c2e076de835f66ac5be710 100644 (file)
@@ -57,12 +57,12 @@ config RTL8192CU
 
 config RTLWIFI
        tristate
-       depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
+       depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
        default m
 
 config RTLWIFI_DEBUG
        bool "Additional debugging output"
-       depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE
+       depends on RTL8192CE || RTL8192CU || RTL8192SE || RTL8192DE || RTL8723AE
        default y
 
 config RTL8192C_COMMON
index 4494d130b37cb0d0ad585eea5142e0c312c86b26..0f8b05185edaeacb6ef686e5e5a1ca0151866916 100644 (file)
@@ -1004,7 +1004,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                                         is_tx ? "Tx" : "Rx");
 
                                if (is_tx) {
-                                       rtl_lps_leave(hw);
+                                       schedule_work(&rtlpriv->
+                                                     works.lps_leave_work);
                                        ppsc->last_delaylps_stamp_jiffies =
                                            jiffies;
                                }
@@ -1014,7 +1015,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                }
        } else if (ETH_P_ARP == ether_type) {
                if (is_tx) {
-                       rtl_lps_leave(hw);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                        ppsc->last_delaylps_stamp_jiffies = jiffies;
                }
 
@@ -1024,7 +1025,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
                         "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
 
                if (is_tx) {
-                       rtl_lps_leave(hw);
+                       schedule_work(&rtlpriv->works.lps_leave_work);
                        ppsc->last_delaylps_stamp_jiffies = jiffies;
                }
 
index 1d5d3604e3e03875f5e9ec4453ad2291c5de3c33..246e5352f2e15a850dbc7971c08bdbbcffbc7172 100644 (file)
@@ -692,7 +692,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
        if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
                rtl92c_phy_sw_chnl_callback(hw);
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        "sw_chnl_inprogress false schdule workitem\n");
+                        "sw_chnl_inprogress false schedule workitem\n");
                rtlphy->sw_chnl_inprogress = false;
        } else {
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
index 39cc7938eedfe44da35ea7a37134a705cfc4db5e..3d8536bb0d2bf5055c181d5d8f4b67d25153488b 100644 (file)
@@ -1106,7 +1106,7 @@ u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw)
        if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
                rtl8723ae_phy_sw_chnl_callback(hw);
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
-                        "sw_chnl_inprogress false schdule workitem\n");
+                        "sw_chnl_inprogress false schedule workitem\n");
                rtlphy->sw_chnl_inprogress = false;
        } else {
                RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
index f2ecdeb3a90d441ee809af029a1db1c8dab80f48..1535efda3d525a0dfafe54bb4442aacec4b00446 100644 (file)
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
        WARN_ON(skb_queue_empty(&rx_queue));
        while (!skb_queue_empty(&rx_queue)) {
                _skb = skb_dequeue(&rx_queue);
-               _rtl_usb_rx_process_agg(hw, skb);
-               ieee80211_rx_irqsafe(hw, skb);
+               _rtl_usb_rx_process_agg(hw, _skb);
+               ieee80211_rx_irqsafe(hw, _skb);
        }
 }
 
index db719f7d26928c4012a1020a050edbd3a9560980..b9e27b98bbc928ac2c7eb7dd9e48053c87ef60bf 100644 (file)
@@ -68,8 +68,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
        unsigned long timeout, start;
        u32 elp_reg;
 
-       if (delayed_work_pending(&wl->elp_work))
-               cancel_delayed_work(&wl->elp_work);
+       cancel_delayed_work(&wl->elp_work);
 
        if (!wl->elp)
                return 0;
index 94b79c3338c4260200306f180ff053b87006faa6..9d7f1723dd8f750126337d7792a0614fea8ca601 100644 (file)
@@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
 /* Notify xenvif that ring now has space to send an skb to the frontend */
 void xenvif_notify_tx_completion(struct xenvif *vif);
 
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(struct xenvif *vif);
+
 /* Returns number of ring slots required to send an skb to the frontend */
 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
 
index b7d41f8c338a8372cea8bfe6649386c6a0d8b090..b8c5193bd420030a363ba30c14e8b485a8d6c5cc 100644 (file)
@@ -343,17 +343,22 @@ err:
        return err;
 }
 
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_carrier_off(struct xenvif *vif)
 {
        struct net_device *dev = vif->dev;
-       if (netif_carrier_ok(dev)) {
-               rtnl_lock();
-               netif_carrier_off(dev); /* discard queued packets */
-               if (netif_running(dev))
-                       xenvif_down(vif);
-               rtnl_unlock();
-               xenvif_put(vif);
-       }
+
+       rtnl_lock();
+       netif_carrier_off(dev); /* discard queued packets */
+       if (netif_running(dev))
+               xenvif_down(vif);
+       rtnl_unlock();
+       xenvif_put(vif);
+}
+
+void xenvif_disconnect(struct xenvif *vif)
+{
+       if (netif_carrier_ok(vif->dev))
+               xenvif_carrier_off(vif);
 
        atomic_dec(&vif->refcnt);
        wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
index f2d6b78d901d92d3965fe3bd849751f04fb7ccf3..2b9520c46e97a8de263b10f602bdb8cc35af0277 100644 (file)
@@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
        atomic_dec(&netbk->netfront_count);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status);
 static void make_tx_response(struct xenvif *vif,
                             struct xen_netif_tx_request *txp,
                             s8       st);
@@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
 
        do {
                make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               if (cons >= end)
+               if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&vif->tx, cons++);
        } while (1);
@@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
        xenvif_put(vif);
 }
 
+static void netbk_fatal_tx_err(struct xenvif *vif)
+{
+       netdev_err(vif->dev, "fatal error; disabling device\n");
+       xenvif_carrier_off(vif);
+       xenvif_put(vif);
+}
+
 static int netbk_count_requests(struct xenvif *vif,
                                struct xen_netif_tx_request *first,
                                struct xen_netif_tx_request *txp,
@@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
 
        do {
                if (frags >= work_to_do) {
-                       netdev_dbg(vif->dev, "Need more frags\n");
+                       netdev_err(vif->dev, "Need more frags\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
                if (unlikely(frags >= MAX_SKB_FRAGS)) {
-                       netdev_dbg(vif->dev, "Too many frags\n");
+                       netdev_err(vif->dev, "Too many frags\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
                memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
                       sizeof(*txp));
                if (txp->size > first->size) {
-                       netdev_dbg(vif->dev, "Frags galore\n");
+                       netdev_err(vif->dev, "Frag is bigger than frame.\n");
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
 
@@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
                frags++;
 
                if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
+                       netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
                                 txp->offset, txp->size);
+                       netbk_fatal_tx_err(vif);
                        return -frags;
                }
        } while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
                pending_idx = netbk->pending_ring[index];
                page = xen_netbk_alloc_page(netbk, skb, pending_idx);
                if (!page)
-                       return NULL;
+                       goto err;
 
                gop->source.u.ref = txp->gref;
                gop->source.domid = vif->domid;
@@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
        }
 
        return gop;
+err:
+       /* Unwind, freeing all pages and sending error responses. */
+       while (i-- > start) {
+               xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
+                                     XEN_NETIF_RSP_ERROR);
+       }
+       /* The head too, if necessary. */
+       if (start)
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+
+       return NULL;
 }
 
 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
@@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 {
        struct gnttab_copy *gop = *gopp;
        u16 pending_idx = *((u16 *)skb->data);
-       struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
-       struct xenvif *vif = pending_tx_info[pending_idx].vif;
-       struct xen_netif_tx_request *txp;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
        int i, err, start;
 
        /* Check status of header. */
        err = gop->status;
-       if (unlikely(err)) {
-               pending_ring_idx_t index;
-               index = pending_index(netbk->pending_prod++);
-               txp = &pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
-       }
+       if (unlikely(err))
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
        for (i = start; i < nr_frags; i++) {
                int j, newerr;
-               pending_ring_idx_t index;
 
                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
 
@@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                if (likely(!newerr)) {
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xen_netbk_idx_release(netbk, pending_idx);
+                               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                        continue;
                }
 
                /* Error on this fragment: respond to client with an error. */
-               txp = &netbk->pending_tx_info[pending_idx].req;
-               make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
-               index = pending_index(netbk->pending_prod++);
-               netbk->pending_ring[index] = pending_idx;
-               xenvif_put(vif);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
@@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
 
                /* First error: invalidate header and preceding fragments. */
                pending_idx = *((u16 *)skb->data);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
@@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
 
                /* Take an extra reference to offset xen_netbk_idx_release */
                get_page(netbk->mmap_pages[pending_idx]);
-               xen_netbk_idx_release(netbk, pending_idx);
+               xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
        }
 }
 
@@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
 
        do {
                if (unlikely(work_to_do-- <= 0)) {
-                       netdev_dbg(vif->dev, "Missing extra info\n");
+                       netdev_err(vif->dev, "Missing extra info\n");
+                       netbk_fatal_tx_err(vif);
                        return -EBADR;
                }
 
@@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
                if (unlikely(!extra.type ||
                             extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
                        vif->tx.req_cons = ++cons;
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "Invalid extra type: %d\n", extra.type);
+                       netbk_fatal_tx_err(vif);
                        return -EINVAL;
                }
 
@@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
                             struct xen_netif_extra_info *gso)
 {
        if (!gso->u.gso.size) {
-               netdev_dbg(vif->dev, "GSO size must not be zero.\n");
+               netdev_err(vif->dev, "GSO size must not be zero.\n");
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        /* Currently only TCPv4 S.O. is supported. */
        if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
-               netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
+               netbk_fatal_tx_err(vif);
                return -EINVAL;
        }
 
@@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
 
                /* Get a netif from the list with work to do. */
                vif = poll_net_schedule_list(netbk);
+               /* This can sometimes happen because the test of
+                * list_empty(net_schedule_list) at the top of the
+                * loop is unlocked.  Just go back and have another
+                * look.
+                */
                if (!vif)
                        continue;
 
+               if (vif->tx.sring->req_prod - vif->tx.req_cons >
+                   XEN_NETIF_TX_RING_SIZE) {
+                       netdev_err(vif->dev,
+                                  "Impossible number of requests. "
+                                  "req_prod %d, req_cons %d, size %ld\n",
+                                  vif->tx.sring->req_prod, vif->tx.req_cons,
+                                  XEN_NETIF_TX_RING_SIZE);
+                       netbk_fatal_tx_err(vif);
+                       continue;
+               }
+
                RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
                if (!work_to_do) {
                        xenvif_put(vif);
@@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        work_to_do = xen_netbk_get_extras(vif, extras,
                                                          work_to_do);
                        idx = vif->tx.req_cons;
-                       if (unlikely(work_to_do < 0)) {
-                               netbk_tx_err(vif, &txreq, idx);
+                       if (unlikely(work_to_do < 0))
                                continue;
-                       }
                }
 
                ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
-               if (unlikely(ret < 0)) {
-                       netbk_tx_err(vif, &txreq, idx - ret);
+               if (unlikely(ret < 0))
                        continue;
-               }
+
                idx += ret;
 
                if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
 
                /* No crossing a page as the payload mustn't fragment. */
                if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
-                       netdev_dbg(vif->dev,
+                       netdev_err(vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
                                   (txreq.offset&~PAGE_MASK) + txreq.size);
-                       netbk_tx_err(vif, &txreq, idx);
+                       netbk_fatal_tx_err(vif);
                        continue;
                }
 
@@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
 
                        if (netbk_set_skb_gso(vif, skb, gso)) {
+                               /* Failure in netbk_set_skb_gso is fatal. */
                                kfree_skb(skb);
-                               netbk_tx_err(vif, &txreq, idx);
                                continue;
                        }
                }
@@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
                        txp->size -= data_len;
                } else {
                        /* Schedule a response immediately. */
-                       xen_netbk_idx_release(netbk, pending_idx);
+                       xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
        xen_netbk_tx_submit(netbk);
 }
 
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
+static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
+                                 u8 status)
 {
        struct xenvif *vif;
        struct pending_tx_info *pending_tx_info;
@@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
 
        vif = pending_tx_info->vif;
 
-       make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
+       make_tx_response(vif, &pending_tx_info->req, status);
 
        index = pending_index(netbk->pending_prod++);
        netbk->pending_ring[index] = pending_idx;
index 26ffd3e3fb7463f84ce4385691fe19e028a26d0a..2c113de943233db361a41e2bdcd92202c37a32b7 100644 (file)
@@ -44,7 +44,6 @@ extern bool pciehp_poll_mode;
 extern int pciehp_poll_time;
 extern bool pciehp_debug;
 extern bool pciehp_force;
-extern struct workqueue_struct *pciehp_wq;
 
 #define dbg(format, arg...)                                            \
 do {                                                                   \
@@ -78,6 +77,7 @@ struct slot {
        struct hotplug_slot *hotplug_slot;
        struct delayed_work work;       /* work for button event */
        struct mutex lock;
+       struct workqueue_struct *wq;
 };
 
 struct event_info {
index 916bf4f53abadc16d9205ac41dc9e3edc6f839ec..939bd1d4b5b138b8f8bbe319007973579a6c39df 100644 (file)
@@ -42,7 +42,6 @@ bool pciehp_debug;
 bool pciehp_poll_mode;
 int pciehp_poll_time;
 bool pciehp_force;
-struct workqueue_struct *pciehp_wq;
 
 #define DRIVER_VERSION "0.4"
 #define DRIVER_AUTHOR  "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -340,18 +339,13 @@ static int __init pcied_init(void)
 {
        int retval = 0;
 
-       pciehp_wq = alloc_workqueue("pciehp", 0, 0);
-       if (!pciehp_wq)
-               return -ENOMEM;
-
        pciehp_firmware_init();
        retval = pcie_port_service_register(&hpdriver_portdrv);
        dbg("pcie_port_service_register = %d\n", retval);
        info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-       if (retval) {
-               destroy_workqueue(pciehp_wq);
+       if (retval)
                dbg("Failure to register service\n");
-       }
+
        return retval;
 }
 
@@ -359,7 +353,6 @@ static void __exit pcied_cleanup(void)
 {
        dbg("unload_pciehpd()\n");
        pcie_port_service_unregister(&hpdriver_portdrv);
-       destroy_workqueue(pciehp_wq);
        info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
 }
 
index 27f44295a65738976b4be0f07f4cd33b04e2cc85..38f01867917521a6402e92dbaaf5add19b27a640 100644 (file)
@@ -49,7 +49,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
        info->p_slot = p_slot;
        INIT_WORK(&info->work, interrupt_event_handler);
 
-       queue_work(pciehp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
 
        return 0;
 }
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
                kfree(info);
                goto out;
        }
-       queue_work(pciehp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
  out:
        mutex_unlock(&p_slot->lock);
 }
@@ -377,7 +377,7 @@ static void handle_button_press_event(struct slot *p_slot)
                if (ATTN_LED(ctrl))
                        pciehp_set_attention_status(p_slot, 0);
 
-               queue_delayed_work(pciehp_wq, &p_slot->work, 5*HZ);
+               queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
                break;
        case BLINKINGOFF_STATE:
        case BLINKINGON_STATE:
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
        else
                p_slot->state = POWERON_STATE;
 
-       queue_work(pciehp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
 }
 
 static void interrupt_event_handler(struct work_struct *work)
index 13b2eaf7ba434f205896785a17eaf335842fbd66..5127f3f418211496ddf44b335ccc5ab44fbac856 100644 (file)
@@ -773,23 +773,32 @@ static void pcie_shutdown_notification(struct controller *ctrl)
 static int pcie_init_slot(struct controller *ctrl)
 {
        struct slot *slot;
+       char name[32];
 
        slot = kzalloc(sizeof(*slot), GFP_KERNEL);
        if (!slot)
                return -ENOMEM;
 
+       snprintf(name, sizeof(name), "pciehp-%u", PSN(ctrl));
+       slot->wq = alloc_workqueue(name, 0, 0);
+       if (!slot->wq)
+               goto abort;
+
        slot->ctrl = ctrl;
        mutex_init(&slot->lock);
        INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
        ctrl->slot = slot;
        return 0;
+abort:
+       kfree(slot);
+       return -ENOMEM;
 }
 
 static void pcie_cleanup_slot(struct controller *ctrl)
 {
        struct slot *slot = ctrl->slot;
        cancel_delayed_work(&slot->work);
-       flush_workqueue(pciehp_wq);
+       destroy_workqueue(slot->wq);
        kfree(slot);
 }
 
index ca64932e658b0c5e981c5bb746663045da9cc7ce..b849f995075a8cd13927ce0ee725279638b5796e 100644 (file)
@@ -46,8 +46,6 @@
 extern bool shpchp_poll_mode;
 extern int shpchp_poll_time;
 extern bool shpchp_debug;
-extern struct workqueue_struct *shpchp_wq;
-extern struct workqueue_struct *shpchp_ordered_wq;
 
 #define dbg(format, arg...)                                            \
 do {                                                                   \
@@ -91,6 +89,7 @@ struct slot {
        struct list_head        slot_list;
        struct delayed_work work;       /* work for button event */
        struct mutex lock;
+       struct workqueue_struct *wq;
        u8 hp_slot;
 };
 
index b6de307248e43cedf52a0a27eacf9211b61efe72..3100c52c837cfceccc7314646898f30846c96b39 100644 (file)
@@ -39,8 +39,6 @@
 bool shpchp_debug;
 bool shpchp_poll_mode;
 int shpchp_poll_time;
-struct workqueue_struct *shpchp_wq;
-struct workqueue_struct *shpchp_ordered_wq;
 
 #define DRIVER_VERSION "0.4"
 #define DRIVER_AUTHOR  "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -129,6 +127,14 @@ static int init_slots(struct controller *ctrl)
                slot->device = ctrl->slot_device_offset + i;
                slot->hpc_ops = ctrl->hpc_ops;
                slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
+
+               snprintf(name, sizeof(name), "shpchp-%d", slot->number);
+               slot->wq = alloc_workqueue(name, 0, 0);
+               if (!slot->wq) {
+                       retval = -ENOMEM;
+                       goto error_info;
+               }
+
                mutex_init(&slot->lock);
                INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
 
@@ -148,7 +154,7 @@ static int init_slots(struct controller *ctrl)
                if (retval) {
                        ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
                                 retval);
-                       goto error_info;
+                       goto error_slotwq;
                }
 
                get_power_status(hotplug_slot, &info->power_status);
@@ -160,6 +166,8 @@ static int init_slots(struct controller *ctrl)
        }
 
        return 0;
+error_slotwq:
+       destroy_workqueue(slot->wq);
 error_info:
        kfree(info);
 error_hpslot:
@@ -180,8 +188,7 @@ void cleanup_slots(struct controller *ctrl)
                slot = list_entry(tmp, struct slot, slot_list);
                list_del(&slot->slot_list);
                cancel_delayed_work(&slot->work);
-               flush_workqueue(shpchp_wq);
-               flush_workqueue(shpchp_ordered_wq);
+               destroy_workqueue(slot->wq);
                pci_hp_deregister(slot->hotplug_slot);
        }
 }
@@ -364,25 +371,12 @@ static struct pci_driver shpc_driver = {
 
 static int __init shpcd_init(void)
 {
-       int retval = 0;
-
-       shpchp_wq = alloc_ordered_workqueue("shpchp", 0);
-       if (!shpchp_wq)
-               return -ENOMEM;
-
-       shpchp_ordered_wq = alloc_ordered_workqueue("shpchp_ordered", 0);
-       if (!shpchp_ordered_wq) {
-               destroy_workqueue(shpchp_wq);
-               return -ENOMEM;
-       }
+       int retval;
 
        retval = pci_register_driver(&shpc_driver);
        dbg("%s: pci_register_driver = %d\n", __func__, retval);
        info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-       if (retval) {
-               destroy_workqueue(shpchp_ordered_wq);
-               destroy_workqueue(shpchp_wq);
-       }
+
        return retval;
 }
 
@@ -390,8 +384,6 @@ static void __exit shpcd_cleanup(void)
 {
        dbg("unload_shpchpd()\n");
        pci_unregister_driver(&shpc_driver);
-       destroy_workqueue(shpchp_ordered_wq);
-       destroy_workqueue(shpchp_wq);
        info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
 }
 
index f9b5a52e4115c321367547c30644b545200928a9..58499277903a4ab4a6225982d88876de399120e2 100644 (file)
@@ -51,7 +51,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
        info->p_slot = p_slot;
        INIT_WORK(&info->work, interrupt_event_handler);
 
-       queue_work(shpchp_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
 
        return 0;
 }
@@ -453,7 +453,7 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
                kfree(info);
                goto out;
        }
-       queue_work(shpchp_ordered_wq, &info->work);
+       queue_work(p_slot->wq, &info->work);
  out:
        mutex_unlock(&p_slot->lock);
 }
@@ -501,7 +501,7 @@ static void handle_button_press_event(struct slot *p_slot)
                p_slot->hpc_ops->green_led_blink(p_slot);
                p_slot->hpc_ops->set_attention_status(p_slot, 0);
 
-               queue_delayed_work(shpchp_wq, &p_slot->work, 5*HZ);
+               queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
                break;
        case BLINKINGOFF_STATE:
        case BLINKINGON_STATE:
index bafd2bbcaf6541d1983fababa4563ae174837e15..c18e5bf444faa693d41c6a9103ef7ab8982b07da 100644 (file)
@@ -739,7 +739,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
 /**
  * pci_sriov_set_totalvfs -- reduce the TotalVFs available
  * @dev: the PCI PF device
- * numvfs: number that should be used for TotalVFs supported
+ * @numvfs: number that should be used for TotalVFs supported
  *
  * Should be called from PF driver's probe routine with
  * device's mutex held.
index 5099636a6e5f65782ea87fbb9c25a0aeeeed282e..00cc78c7aa045e9eb5ff747afce8ac3b66a57e7d 100644 (file)
@@ -845,6 +845,32 @@ int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
 }
 EXPORT_SYMBOL(pci_enable_msi_block);
 
+int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
+{
+       int ret, pos, nvec;
+       u16 msgctl;
+
+       pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+       if (!pos)
+               return -EINVAL;
+
+       pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+       ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+       if (maxvec)
+               *maxvec = ret;
+
+       do {
+               nvec = ret;
+               ret = pci_enable_msi_block(dev, nvec);
+       } while (ret > 0);
+
+       if (ret < 0)
+               return ret;
+       return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msi_block_auto);
+
 void pci_msi_shutdown(struct pci_dev *dev)
 {
        struct msi_desc *desc;
index 6c8bc5809787d7e2a4b82c72c590918c20f49665..fde4a32a0295082a95de76b6ea08dc1d59fdc2a9 100644 (file)
@@ -82,4 +82,4 @@ endchoice
 
 config PCIE_PME
        def_bool y
-       depends on PCIEPORTBUS && PM_RUNTIME && EXPERIMENTAL && ACPI
+       depends on PCIEPORTBUS && PM_RUNTIME && ACPI
index 421bbc5fee324b485f409e7a025338bc524eecf5..564d97f94b6cd4d248b7cdad8b3b4e138144ff97 100644 (file)
@@ -630,6 +630,7 @@ static void aer_recover_work_func(struct work_struct *work)
                        continue;
                }
                do_recovery(pdev, entry.severity);
+               pci_dev_put(pdev);
        }
 }
 #endif
index 3ea51736f18db45be7c40280626eb0d803c157b1..5ab14251839d0f06c395ac0dfc064c166cf3b75a 100644 (file)
@@ -23,6 +23,9 @@
 
 #include "aerdrv.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/ras.h>
+
 #define AER_AGENT_RECEIVER             0
 #define AER_AGENT_REQUESTER            1
 #define AER_AGENT_COMPLETER            2
@@ -121,12 +124,11 @@ static const char *aer_agent_string[] = {
        "Transmitter ID"
 };
 
-static void __aer_print_error(const char *prefix,
+static void __aer_print_error(struct pci_dev *dev,
                              struct aer_err_info *info)
 {
        int i, status;
        const char *errmsg = NULL;
-
        status = (info->status & ~info->mask);
 
        for (i = 0; i < 32; i++) {
@@ -141,26 +143,22 @@ static void __aer_print_error(const char *prefix,
                                aer_uncorrectable_error_string[i] : NULL;
 
                if (errmsg)
-                       printk("%s""   [%2d] %-22s%s\n", prefix, i, errmsg,
+                       dev_err(&dev->dev, "   [%2d] %-22s%s\n", i, errmsg,
                                info->first_error == i ? " (First)" : "");
                else
-                       printk("%s""   [%2d] Unknown Error Bit%s\n", prefix, i,
-                               info->first_error == i ? " (First)" : "");
+                       dev_err(&dev->dev, "   [%2d] Unknown Error Bit%s\n",
+                               i, info->first_error == i ? " (First)" : "");
        }
 }
 
 void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
 {
        int id = ((dev->bus->number << 8) | dev->devfn);
-       char prefix[44];
-
-       snprintf(prefix, sizeof(prefix), "%s%s %s: ",
-                (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR,
-                dev_driver_string(&dev->dev), dev_name(&dev->dev));
 
        if (info->status == 0) {
-               printk("%s""PCIe Bus Error: severity=%s, type=Unaccessible, "
-                       "id=%04x(Unregistered Agent ID)\n", prefix,
+               dev_err(&dev->dev,
+                       "PCIe Bus Error: severity=%s, type=Unaccessible, "
+                       "id=%04x(Unregistered Agent ID)\n",
                        aer_error_severity_string[info->severity], id);
        } else {
                int layer, agent;
@@ -168,22 +166,24 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
                layer = AER_GET_LAYER_ERROR(info->severity, info->status);
                agent = AER_GET_AGENT(info->severity, info->status);
 
-               printk("%s""PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
-                       prefix, aer_error_severity_string[info->severity],
+               dev_err(&dev->dev,
+                       "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+                       aer_error_severity_string[info->severity],
                        aer_error_layer[layer], id, aer_agent_string[agent]);
 
-               printk("%s""  device [%04x:%04x] error status/mask=%08x/%08x\n",
-                       prefix, dev->vendor, dev->device,
+               dev_err(&dev->dev,
+                       "  device [%04x:%04x] error status/mask=%08x/%08x\n",
+                       dev->vendor, dev->device,
                        info->status, info->mask);
 
-               __aer_print_error(prefix, info);
+               __aer_print_error(dev, info);
 
                if (info->tlp_header_valid) {
                        unsigned char *tlp = (unsigned char *) &info->tlp;
-                       printk("%s""  TLP Header:"
+                       dev_err(&dev->dev, "  TLP Header:"
                                " %02x%02x%02x%02x %02x%02x%02x%02x"
                                " %02x%02x%02x%02x %02x%02x%02x%02x\n",
-                               prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+                               *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
                                *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
                                *(tlp + 11), *(tlp + 10), *(tlp + 9),
                                *(tlp + 8), *(tlp + 15), *(tlp + 14),
@@ -192,8 +192,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
        }
 
        if (info->id && info->error_dev_num > 1 && info->id == id)
-               printk("%s""  Error of this Agent(%04x) is reported first\n",
-                       prefix, id);
+               dev_err(&dev->dev,
+                          "  Error of this Agent(%04x) is reported first\n",
+                       id);
+       trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
+                       info->severity);
 }
 
 void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
@@ -217,7 +220,7 @@ int cper_severity_to_aer(int cper_severity)
 }
 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
 
-void cper_print_aer(const char *prefix, int cper_severity,
+void cper_print_aer(const char *prefix, struct pci_dev *dev, int cper_severity,
                    struct aer_capability_regs *aer)
 {
        int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
@@ -239,25 +242,27 @@ void cper_print_aer(const char *prefix, int cper_severity,
        }
        layer = AER_GET_LAYER_ERROR(aer_severity, status);
        agent = AER_GET_AGENT(aer_severity, status);
-       printk("%s""aer_status: 0x%08x, aer_mask: 0x%08x\n",
-              prefix, status, mask);
+       dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+              status, mask);
        cper_print_bits(prefix, status, status_strs, status_strs_size);
-       printk("%s""aer_layer=%s, aer_agent=%s\n", prefix,
+       dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
               aer_error_layer[layer], aer_agent_string[agent]);
        if (aer_severity != AER_CORRECTABLE)
-               printk("%s""aer_uncor_severity: 0x%08x\n",
-                      prefix, aer->uncor_severity);
+               dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
+                      aer->uncor_severity);
        if (tlp_header_valid) {
                const unsigned char *tlp;
                tlp = (const unsigned char *)&aer->header_log;
-               printk("%s""aer_tlp_header:"
+               dev_err(&dev->dev, "aer_tlp_header:"
                        " %02x%02x%02x%02x %02x%02x%02x%02x"
                        " %02x%02x%02x%02x %02x%02x%02x%02x\n",
-                       prefix, *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
+                       *(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
                        *(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
                        *(tlp + 11), *(tlp + 10), *(tlp + 9),
                        *(tlp + 8), *(tlp + 15), *(tlp + 14),
                        *(tlp + 13), *(tlp + 12));
        }
+       trace_aer_event(dev_name(&dev->dev), (status & ~mask),
+                       aer_severity);
 }
 #endif
index b52630b8eada26ad35f8304e436c63f3e064edd3..8474b6a4fc9bb714e7ab85cf539353fd151b8125 100644 (file)
@@ -771,6 +771,9 @@ void pcie_clear_aspm(struct pci_bus *bus)
 {
        struct pci_dev *child;
 
+       if (aspm_force)
+               return;
+
        /*
         * Clear any ASPM setup that the firmware has carried out on this bus
         */
index 7c0fd9252e6f03c1b1f17f53282be0c5a1fdb57d..84954a726a94d058ecf2e95100dcb01800cc434b 100644 (file)
@@ -19,6 +19,8 @@ static void pci_free_resources(struct pci_dev *dev)
 
 static void pci_stop_dev(struct pci_dev *dev)
 {
+       pci_pme_active(dev, false);
+
        if (dev->is_added) {
                pci_proc_detach_device(dev);
                pci_remove_sysfs_dev_files(dev);
index c31aeb01bb0002cc648b603c6c526ad83f9ab9a9..393b0ecf4ca4f21ec3731db30e9531bf7eaa4043 100644 (file)
@@ -26,6 +26,29 @@ config DEBUG_PINCTRL
        help
          Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
+config PINCTRL_ABX500
+       bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
+       depends on AB8500_CORE
+       select GENERIC_PINCONF
+       help
+         Select this to enable the ABx500 family IC GPIO driver
+
+config PINCTRL_AB8500
+       bool "AB8500 pin controller driver"
+       depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB8540
+       bool "AB8540 pin controller driver"
+       depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB9540
+       bool "AB9540 pin controller driver"
+       depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB8505
+       bool "AB8505 pin controller driver"
+       depends on PINCTRL_ABX500 && ARCH_U8500
+
 config PINCTRL_AT91
        bool "AT91 pinctrl driver"
        depends on OF
@@ -151,6 +174,11 @@ config PINCTRL_SIRF
        depends on ARCH_SIRF
        select PINMUX
 
+config PINCTRL_SUNXI
+       bool
+       select PINMUX
+       select GENERIC_PINCONF
+
 config PINCTRL_TEGRA
        bool
        select PINMUX
@@ -164,6 +192,10 @@ config PINCTRL_TEGRA30
        bool
        select PINCTRL_TEGRA
 
+config PINCTRL_TEGRA114
+       bool
+       select PINCTRL_TEGRA
+
 config PINCTRL_U300
        bool "U300 pin controller driver"
        depends on ARCH_U300
@@ -181,12 +213,11 @@ config PINCTRL_COH901
 
 config PINCTRL_SAMSUNG
        bool
-       depends on OF && GPIOLIB
        select PINMUX
        select PINCONF
 
-config PINCTRL_EXYNOS4
-       bool "Pinctrl driver data for Exynos4 SoC"
+config PINCTRL_EXYNOS
+       bool "Pinctrl driver data for Samsung EXYNOS SoCs"
        depends on OF && GPIOLIB
        select PINCTRL_SAMSUNG
 
index fc4606f27dc793897aac033797f2e680e80f3dad..0fd5f57fcb57f7e935d960de62e2db954f23038c 100644 (file)
@@ -9,6 +9,11 @@ ifeq ($(CONFIG_OF),y)
 obj-$(CONFIG_PINCTRL)          += devicetree.o
 endif
 obj-$(CONFIG_GENERIC_PINCONF)  += pinconf-generic.o
+obj-$(CONFIG_PINCTRL_ABX500)   += pinctrl-abx500.o
+obj-$(CONFIG_PINCTRL_AB8500)   += pinctrl-ab8500.o
+obj-$(CONFIG_PINCTRL_AB8540)   += pinctrl-ab8540.o
+obj-$(CONFIG_PINCTRL_AB9540)   += pinctrl-ab9540.o
+obj-$(CONFIG_PINCTRL_AB8505)   += pinctrl-ab8505.o
 obj-$(CONFIG_PINCTRL_AT91)     += pinctrl-at91.o
 obj-$(CONFIG_PINCTRL_BCM2835)  += pinctrl-bcm2835.o
 obj-$(CONFIG_PINCTRL_IMX)      += pinctrl-imx.o
@@ -30,13 +35,15 @@ obj-$(CONFIG_PINCTRL_PXA168)        += pinctrl-pxa168.o
 obj-$(CONFIG_PINCTRL_PXA910)   += pinctrl-pxa910.o
 obj-$(CONFIG_PINCTRL_SINGLE)   += pinctrl-single.o
 obj-$(CONFIG_PINCTRL_SIRF)     += pinctrl-sirf.o
+obj-$(CONFIG_PINCTRL_SUNXI)    += pinctrl-sunxi.o
 obj-$(CONFIG_PINCTRL_TEGRA)    += pinctrl-tegra.o
 obj-$(CONFIG_PINCTRL_TEGRA20)  += pinctrl-tegra20.o
 obj-$(CONFIG_PINCTRL_TEGRA30)  += pinctrl-tegra30.o
+obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
 obj-$(CONFIG_PINCTRL_U300)     += pinctrl-u300.o
 obj-$(CONFIG_PINCTRL_COH901)   += pinctrl-coh901.o
 obj-$(CONFIG_PINCTRL_SAMSUNG)  += pinctrl-samsung.o
-obj-$(CONFIG_PINCTRL_EXYNOS4)  += pinctrl-exynos.o
+obj-$(CONFIG_PINCTRL_EXYNOS  += pinctrl-exynos.o
 obj-$(CONFIG_PINCTRL_EXYNOS5440)       += pinctrl-exynos5440.o
 obj-$(CONFIG_PINCTRL_XWAY)     += pinctrl-xway.o
 obj-$(CONFIG_PINCTRL_LANTIQ)   += pinctrl-lantiq.o
index 59f5a965bdc40ad47e566bc1969f236ba06d8689..b0de6e7f1fdb8cb68566b24f04eee8722ab77862 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) "pinctrl core: " fmt
 
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/device.h>
 #include "pinmux.h"
 #include "pinconf.h"
 
-/**
- * struct pinctrl_maps - a list item containing part of the mapping table
- * @node: mapping table list node
- * @maps: array of mapping table entries
- * @num_maps: the number of entries in @maps
- */
-struct pinctrl_maps {
-       struct list_head node;
-       struct pinctrl_map const *maps;
-       unsigned num_maps;
-};
 
 static bool pinctrl_dummy_state;
 
@@ -55,13 +45,8 @@ LIST_HEAD(pinctrldev_list);
 static LIST_HEAD(pinctrl_list);
 
 /* List of pinctrl maps (struct pinctrl_maps) */
-static LIST_HEAD(pinctrl_maps);
+LIST_HEAD(pinctrl_maps);
 
-#define for_each_maps(_maps_node_, _i_, _map_) \
-       list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
-               for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
-                       _i_ < _maps_node_->num_maps; \
-                       _i_++, _map_ = &_maps_node_->maps[_i_])
 
 /**
  * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
@@ -83,6 +68,12 @@ const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
 }
 EXPORT_SYMBOL_GPL(pinctrl_dev_get_name);
 
+const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev)
+{
+       return dev_name(pctldev->dev);
+}
+EXPORT_SYMBOL_GPL(pinctrl_dev_get_devname);
+
 void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev)
 {
        return pctldev->driver_data;
@@ -609,13 +600,16 @@ static int add_setting(struct pinctrl *p, struct pinctrl_map const *map)
 
        setting->pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
        if (setting->pctldev == NULL) {
-               dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
-                       map->ctrl_dev_name);
                kfree(setting);
+               /* Do not defer probing of hogs (circular loop) */
+               if (!strcmp(map->ctrl_dev_name, map->dev_name))
+                       return -ENODEV;
                /*
                 * OK let us guess that the driver is not there yet, and
                 * let's defer obtaining this pinctrl handle to later...
                 */
+               dev_info(p->dev, "unknown pinctrl device %s in map entry, deferring probe",
+                       map->ctrl_dev_name);
                return -EPROBE_DEFER;
        }
 
@@ -694,11 +688,31 @@ static struct pinctrl *create_pinctrl(struct device *dev)
                        continue;
 
                ret = add_setting(p, map);
-               if (ret < 0) {
+               /*
+                * At this point the adding of a setting may:
+                *
+                * - Defer, if the pinctrl device is not yet available
+                * - Fail, if the pinctrl device is not yet available,
+                *   AND the setting is a hog. We cannot defer that, since
+                *   the hog will kick in immediately after the device
+                *   is registered.
+                *
+                * If the error returned was not -EPROBE_DEFER then we
+                * accumulate the errors to see if we end up with
+                * an -EPROBE_DEFER later, as that is the worst case.
+                */
+               if (ret == -EPROBE_DEFER) {
                        pinctrl_put_locked(p, false);
                        return ERR_PTR(ret);
                }
        }
+       if (ret < 0) {
+               /* If some other error than deferral occured, return here */
+               pinctrl_put_locked(p, false);
+               return ERR_PTR(ret);
+       }
+
+       kref_init(&p->users);
 
        /* Add the pinctrl handle to the global list */
        list_add_tail(&p->node, &pinctrl_list);
@@ -713,9 +727,17 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev)
        if (WARN_ON(!dev))
                return ERR_PTR(-EINVAL);
 
+       /*
+        * See if somebody else (such as the device core) has already
+        * obtained a handle to the pinctrl for this device. In that case,
+        * return another pointer to it.
+        */
        p = find_pinctrl(dev);
-       if (p != NULL)
-               return ERR_PTR(-EBUSY);
+       if (p != NULL) {
+               dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
+               kref_get(&p->users);
+               return p;
+       }
 
        return create_pinctrl(dev);
 }
@@ -771,13 +793,24 @@ static void pinctrl_put_locked(struct pinctrl *p, bool inlist)
 }
 
 /**
- * pinctrl_put() - release a previously claimed pinctrl handle
+ * pinctrl_release() - release the pinctrl handle
+ * @kref: the kref in the pinctrl being released
+ */
+static void pinctrl_release(struct kref *kref)
+{
+       struct pinctrl *p = container_of(kref, struct pinctrl, users);
+
+       pinctrl_put_locked(p, true);
+}
+
+/**
+ * pinctrl_put() - decrease use count on a previously claimed pinctrl handle
  * @p: the pinctrl handle to release
  */
 void pinctrl_put(struct pinctrl *p)
 {
        mutex_lock(&pinctrl_mutex);
-       pinctrl_put_locked(p, true);
+       kref_put(&p->users, pinctrl_release);
        mutex_unlock(&pinctrl_mutex);
 }
 EXPORT_SYMBOL_GPL(pinctrl_put);
@@ -1055,6 +1088,30 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
        }
 }
 
+/**
+ * pinctrl_force_sleep() - turn a given controller device into sleep state
+ * @pctldev: pin controller device
+ */
+int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
+{
+       if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
+               return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
+
+/**
+ * pinctrl_force_default() - turn a given controller device into default state
+ * @pctldev: pin controller device
+ */
+int pinctrl_force_default(struct pinctrl_dev *pctldev)
+{
+       if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
+               return pinctrl_select_state(pctldev->p, pctldev->hog_default);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_force_default);
+
 #ifdef CONFIG_DEBUG_FS
 
 static int pinctrl_pins_show(struct seq_file *s, void *what)
@@ -1500,16 +1557,23 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
 
        pctldev->p = pinctrl_get_locked(pctldev->dev);
        if (!IS_ERR(pctldev->p)) {
-               struct pinctrl_state *s =
+               pctldev->hog_default =
                        pinctrl_lookup_state_locked(pctldev->p,
                                                    PINCTRL_STATE_DEFAULT);
-               if (IS_ERR(s)) {
+               if (IS_ERR(pctldev->hog_default)) {
                        dev_dbg(dev, "failed to lookup the default state\n");
                } else {
-                       if (pinctrl_select_state_locked(pctldev->p, s))
+                       if (pinctrl_select_state_locked(pctldev->p,
+                                               pctldev->hog_default))
                                dev_err(dev,
                                        "failed to select default state\n");
                }
+
+               pctldev->hog_sleep =
+                       pinctrl_lookup_state_locked(pctldev->p,
+                                                   PINCTRL_STATE_SLEEP);
+               if (IS_ERR(pctldev->hog_sleep))
+                       dev_dbg(dev, "failed to lookup the sleep state\n");
        }
 
        mutex_unlock(&pinctrl_mutex);
index 12f5694f3d5d9f02020cd2369603d6202e488b24..ee72f1f6d86239ac91ba6e29be716356502ced5b 100644 (file)
@@ -9,6 +9,7 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
+#include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/radix-tree.h>
 #include <linux/pinctrl/pinconf.h>
@@ -30,6 +31,8 @@ struct pinctrl_gpio_range;
  * @driver_data: driver data for drivers registering to the pin controller
  *     subsystem
  * @p: result of pinctrl_get() for this device
+ * @hog_default: default state for pins hogged by this device
+ * @hog_sleep: sleep state for pins hogged by this device
  * @device_root: debugfs root for this device
  */
 struct pinctrl_dev {
@@ -41,6 +44,8 @@ struct pinctrl_dev {
        struct module *owner;
        void *driver_data;
        struct pinctrl *p;
+       struct pinctrl_state *hog_default;
+       struct pinctrl_state *hog_sleep;
 #ifdef CONFIG_DEBUG_FS
        struct dentry *device_root;
 #endif
@@ -54,6 +59,7 @@ struct pinctrl_dev {
  * @state: the current state
  * @dt_maps: the mapping table chunks dynamically parsed from device tree for
  *     this device, if any
+ * @users: reference count
  */
 struct pinctrl {
        struct list_head node;
@@ -61,6 +67,7 @@ struct pinctrl {
        struct list_head states;
        struct pinctrl_state *state;
        struct list_head dt_maps;
+       struct kref users;
 };
 
 /**
@@ -148,6 +155,18 @@ struct pin_desc {
 #endif
 };
 
+/**
+ * struct pinctrl_maps - a list item containing part of the mapping table
+ * @node: mapping table list node
+ * @maps: array of mapping table entries
+ * @num_maps: the number of entries in @maps
+ */
+struct pinctrl_maps {
+       struct list_head node;
+       struct pinctrl_map const *maps;
+       unsigned num_maps;
+};
+
 struct pinctrl_dev *get_pinctrl_dev_from_devname(const char *dev_name);
 int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
 const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin);
@@ -164,5 +183,15 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
                         bool dup, bool locked);
 void pinctrl_unregister_map(struct pinctrl_map const *map);
 
+extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
+extern int pinctrl_force_default(struct pinctrl_dev *pctldev);
+
 extern struct mutex pinctrl_mutex;
 extern struct list_head pinctrldev_list;
+extern struct list_head pinctrl_maps;
+
+#define for_each_maps(_maps_node_, _i_, _map_) \
+       list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
+               for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
+                       _i_ < _maps_node_->num_maps; \
+                       _i_++, _map_ = &_maps_node_->maps[_i_])
index fe2d1af7cfa0d0bce2eeb8a8391a4567da70bdd8..fd40a11ad645a38e9a82739c9d0aaa58124db0d0 100644 (file)
@@ -141,6 +141,11 @@ static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
                pctldev = find_pinctrl_by_of_node(np_pctldev);
                if (pctldev)
                        break;
+               /* Do not defer probing of hogs (circular loop) */
+               if (np_pctldev == p->dev->of_node) {
+                       of_node_put(np_pctldev);
+                       return -ENODEV;
+               }
        }
        of_node_put(np_pctldev);
 
index 69aba3697287d02e749fc5c27155c026518601b7..428ea96a94d356a3caee8b971a1df51d8c9564c3 100644 (file)
@@ -588,7 +588,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match =
                of_match_device(dove_pinctrl_of_match, &pdev->dev);
-       pdev->dev.platform_data = match->data;
+       pdev->dev.platform_data = (void *)match->data;
 
        /*
         * General MPP Configuration Register is part of pdma registers.
index f12084e180579afe19cb3914ff8d4e4db7573eed..cdd483df673eaf5d38d522ba3e8f5ef26f835c3c 100644 (file)
@@ -66,9 +66,9 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
                MPP_VAR_FUNCTION(0x5, "sata0", "act",    V(0, 1, 1, 1, 1, 0)),
                MPP_VAR_FUNCTION(0xb, "lcd", "vsync",    V(0, 0, 0, 0, 1, 0))),
        MPP_MODE(6,
-               MPP_VAR_FUNCTION(0x0, "sysrst", "out",   V(1, 1, 1, 1, 1, 1)),
-               MPP_VAR_FUNCTION(0x1, "spi", "mosi",     V(1, 1, 1, 1, 1, 1)),
-               MPP_VAR_FUNCTION(0x2, "ptp", "trig",     V(1, 1, 1, 1, 0, 0))),
+               MPP_VAR_FUNCTION(0x1, "sysrst", "out",   V(1, 1, 1, 1, 1, 1)),
+               MPP_VAR_FUNCTION(0x2, "spi", "mosi",     V(1, 1, 1, 1, 1, 1)),
+               MPP_VAR_FUNCTION(0x3, "ptp", "trig",     V(1, 1, 1, 1, 0, 0))),
        MPP_MODE(7,
                MPP_VAR_FUNCTION(0x0, "gpo", NULL,       V(1, 1, 1, 1, 1, 1)),
                MPP_VAR_FUNCTION(0x1, "pex", "rsto",     V(1, 1, 1, 1, 0, 1)),
@@ -458,7 +458,7 @@ static int kirkwood_pinctrl_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match =
                of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
-       pdev->dev.platform_data = match->data;
+       pdev->dev.platform_data = (void *)match->data;
        return mvebu_pinctrl_probe(pdev);
 }
 
index 833a36458157dd4c15ce0361ee84ced454cd93f4..06c304ac6f7da2794d38e380bdd51a8a91736182 100644 (file)
@@ -41,11 +41,13 @@ struct pin_config_item conf_items[] = {
        PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
        PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
-       PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_DISABLE, "input schmitt disabled", NULL),
+       PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL),
        PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
        PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"),
        PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"),
+       PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL),
        PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"),
+       PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level"),
 };
 
 void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
index baee2cc46a17a7c5c328f98c7eaa25f8f10d8fba..ac8d382a79bbfe43de8769ea43d1c13ab2fac036 100644 (file)
@@ -574,6 +574,207 @@ static const struct file_operations pinconf_groups_ops = {
        .release        = single_release,
 };
 
+/* 32bit read/write ressources */
+#define MAX_NAME_LEN 16
+char dbg_pinname[MAX_NAME_LEN]; /* shared: name of the state of the pin*/
+char dbg_state_name[MAX_NAME_LEN]; /* shared: state of the pin*/
+static u32 dbg_config; /* shared: config to be read/set for the pin & state*/
+
+static int pinconf_dbg_pinname_print(struct seq_file *s, void *d)
+{
+       if (strlen(dbg_pinname))
+               seq_printf(s, "%s\n", dbg_pinname);
+       else
+               seq_printf(s, "No pin name set\n");
+       return 0;
+}
+
+static int pinconf_dbg_pinname_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pinconf_dbg_pinname_print, inode->i_private);
+}
+
+static int pinconf_dbg_pinname_write(struct file *file,
+       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       int err;
+
+       if (count > MAX_NAME_LEN)
+               return -EINVAL;
+
+       err = sscanf(user_buf, "%15s", dbg_pinname);
+
+       if (err != 1)
+               return -EINVAL;
+
+       return count;
+}
+
+static const struct file_operations pinconf_dbg_pinname_fops = {
+       .open = pinconf_dbg_pinname_open,
+       .write = pinconf_dbg_pinname_write,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .owner = THIS_MODULE,
+};
+
+static int pinconf_dbg_state_print(struct seq_file *s, void *d)
+{
+       if (strlen(dbg_state_name))
+               seq_printf(s, "%s\n", dbg_pinname);
+       else
+               seq_printf(s, "No pin state set\n");
+       return 0;
+}
+
+static int pinconf_dbg_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pinconf_dbg_state_print, inode->i_private);
+}
+
+static int pinconf_dbg_state_write(struct file *file,
+       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       int err;
+
+       if (count > MAX_NAME_LEN)
+               return -EINVAL;
+
+       err = sscanf(user_buf, "%15s", dbg_state_name);
+
+       if (err != 1)
+               return -EINVAL;
+
+       return count;
+}
+
+static const struct file_operations pinconf_dbg_pinstate_fops = {
+       .open = pinconf_dbg_state_open,
+       .write = pinconf_dbg_state_write,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .owner = THIS_MODULE,
+};
+
+/**
+ * pinconf_dbg_config_print() - display the pinctrl config from the pinctrl
+ * map, of a pin/state pair based on pinname and state that have been
+ * selected with the debugfs entries pinconf-name and pinconf-state
+ * @s: contains the 32bits config to be written
+ * @d: not used
+ */
+static int pinconf_dbg_config_print(struct seq_file *s, void *d)
+{
+       struct pinctrl_maps *maps_node;
+       struct pinctrl_map const *map;
+       struct pinctrl_dev *pctldev = NULL;
+       struct pinconf_ops *confops = NULL;
+       int i, j;
+       bool found = false;
+
+       mutex_lock(&pinctrl_mutex);
+
+       /* Parse the pinctrl map and look for the elected pin/state */
+       for_each_maps(maps_node, i, map) {
+               if (map->type != PIN_MAP_TYPE_CONFIGS_PIN)
+                       continue;
+
+               if (strncmp(map->name, dbg_state_name, MAX_NAME_LEN) > 0)
+                       continue;
+
+               for (j = 0; j < map->data.configs.num_configs; j++) {
+                       if (0 == strncmp(map->data.configs.group_or_pin,
+                                               dbg_pinname, MAX_NAME_LEN)) {
+                               /* We found the right pin / state, read the
+                                * config and store the pctldev */
+                               dbg_config = map->data.configs.configs[j];
+                               pctldev = get_pinctrl_dev_from_devname
+                                       (map->ctrl_dev_name);
+                               found = true;
+                               break;
+                       }
+               }
+       }
+
+       mutex_unlock(&pinctrl_mutex);
+
+       if (found) {
+               seq_printf(s, "Config of %s in state %s: 0x%08X\n", dbg_pinname,
+                                dbg_state_name, dbg_config);
+
+               if (pctldev)
+                       confops = pctldev->desc->confops;
+
+               if (confops && confops->pin_config_config_dbg_show)
+                       confops->pin_config_config_dbg_show(pctldev,
+                                       s, dbg_config);
+       } else {
+               seq_printf(s, "No pin found for defined name/state\n");
+       }
+
+       return 0;
+}
+
+static int pinconf_dbg_config_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pinconf_dbg_config_print, inode->i_private);
+}
+
+/**
+ * pinconf_dbg_config_write() - overwrite the pinctrl config in thepinctrl
+ * map, of a pin/state pair based on pinname and state that have been
+ * selected with the debugfs entries pinconf-name and pinconf-state
+ */
+static int pinconf_dbg_config_write(struct file *file,
+       const char __user *user_buf, size_t count, loff_t *ppos)
+{
+       int err;
+       unsigned long config;
+       struct pinctrl_maps *maps_node;
+       struct pinctrl_map const *map;
+       int i, j;
+
+       err = kstrtoul_from_user(user_buf, count, 0, &config);
+
+       if (err)
+               return err;
+
+       dbg_config = config;
+
+       mutex_lock(&pinctrl_mutex);
+
+       /* Parse the pinctrl map and look for the selected pin/state */
+       for_each_maps(maps_node, i, map) {
+               if (map->type != PIN_MAP_TYPE_CONFIGS_PIN)
+                       continue;
+
+               if (strncmp(map->name, dbg_state_name, MAX_NAME_LEN) > 0)
+                       continue;
+
+               /*  we found the right pin / state, so overwrite config */
+               for (j = 0; j < map->data.configs.num_configs; j++) {
+                       if (strncmp(map->data.configs.group_or_pin, dbg_pinname,
+                                               MAX_NAME_LEN) == 0)
+                               map->data.configs.configs[j] = dbg_config;
+               }
+       }
+
+       mutex_unlock(&pinctrl_mutex);
+
+       return count;
+}
+
+static const struct file_operations pinconf_dbg_pinconfig_fops = {
+       .open = pinconf_dbg_config_open,
+       .write = pinconf_dbg_config_write,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .owner = THIS_MODULE,
+};
+
 void pinconf_init_device_debugfs(struct dentry *devroot,
                         struct pinctrl_dev *pctldev)
 {
@@ -581,6 +782,12 @@ void pinconf_init_device_debugfs(struct dentry *devroot,
                            devroot, pctldev, &pinconf_pins_ops);
        debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
                            devroot, pctldev, &pinconf_groups_ops);
+       debugfs_create_file("pinconf-name", (S_IRUGO | S_IWUSR | S_IWGRP),
+                           devroot, pctldev, &pinconf_dbg_pinname_fops);
+       debugfs_create_file("pinconf-state",  (S_IRUGO | S_IWUSR | S_IWGRP),
+                           devroot, pctldev, &pinconf_dbg_pinstate_fops);
+       debugfs_create_file("pinconf-config",  (S_IRUGO | S_IWUSR | S_IWGRP),
+                           devroot, pctldev, &pinconf_dbg_pinconfig_fops);
 }
 
 #endif
diff --git a/drivers/pinctrl/pinctrl-ab8500.c b/drivers/pinctrl/pinctrl-ab8500.c
new file mode 100644 (file)
index 0000000..3b471d8
--- /dev/null
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset)            (offset)
+
+#define AB8500_PIN_T10         ABX500_GPIO(1)
+#define AB8500_PIN_T9          ABX500_GPIO(2)
+#define AB8500_PIN_U9          ABX500_GPIO(3)
+#define AB8500_PIN_W2          ABX500_GPIO(4)
+/* hole */
+#define AB8500_PIN_Y18         ABX500_GPIO(6)
+#define AB8500_PIN_AA20                ABX500_GPIO(7)
+#define AB8500_PIN_W18         ABX500_GPIO(8)
+#define AB8500_PIN_AA19                ABX500_GPIO(9)
+#define AB8500_PIN_U17         ABX500_GPIO(10)
+#define AB8500_PIN_AA18                ABX500_GPIO(11)
+#define AB8500_PIN_U16         ABX500_GPIO(12)
+#define AB8500_PIN_W17         ABX500_GPIO(13)
+#define AB8500_PIN_F14         ABX500_GPIO(14)
+#define AB8500_PIN_B17         ABX500_GPIO(15)
+#define AB8500_PIN_F15         ABX500_GPIO(16)
+#define AB8500_PIN_P5          ABX500_GPIO(17)
+#define AB8500_PIN_R5          ABX500_GPIO(18)
+#define AB8500_PIN_U5          ABX500_GPIO(19)
+#define AB8500_PIN_T5          ABX500_GPIO(20)
+#define AB8500_PIN_H19         ABX500_GPIO(21)
+#define AB8500_PIN_G20         ABX500_GPIO(22)
+#define AB8500_PIN_G19         ABX500_GPIO(23)
+#define AB8500_PIN_T14         ABX500_GPIO(24)
+#define AB8500_PIN_R16         ABX500_GPIO(25)
+#define AB8500_PIN_M16         ABX500_GPIO(26)
+#define AB8500_PIN_J6          ABX500_GPIO(27)
+#define AB8500_PIN_K6          ABX500_GPIO(28)
+#define AB8500_PIN_G6          ABX500_GPIO(29)
+#define AB8500_PIN_H6          ABX500_GPIO(30)
+#define AB8500_PIN_F5          ABX500_GPIO(31)
+#define AB8500_PIN_G5          ABX500_GPIO(32)
+/* hole */
+#define AB8500_PIN_R17         ABX500_GPIO(34)
+#define AB8500_PIN_W15         ABX500_GPIO(35)
+#define AB8500_PIN_A17         ABX500_GPIO(36)
+#define AB8500_PIN_E15         ABX500_GPIO(37)
+#define AB8500_PIN_C17         ABX500_GPIO(38)
+#define AB8500_PIN_E16         ABX500_GPIO(39)
+#define AB8500_PIN_T19         ABX500_GPIO(40)
+#define AB8500_PIN_U19         ABX500_GPIO(41)
+#define AB8500_PIN_U2          ABX500_GPIO(42)
+
+/* indicates the highest GPIO number */
+#define AB8500_GPIO_MAX_NUMBER 42
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab8500_pins[] = {
+       PINCTRL_PIN(AB8500_PIN_T10, "GPIO1_T10"),
+       PINCTRL_PIN(AB8500_PIN_T9, "GPIO2_T9"),
+       PINCTRL_PIN(AB8500_PIN_U9, "GPIO3_U9"),
+       PINCTRL_PIN(AB8500_PIN_W2, "GPIO4_W2"),
+       /* hole */
+       PINCTRL_PIN(AB8500_PIN_Y18, "GPIO6_Y18"),
+       PINCTRL_PIN(AB8500_PIN_AA20, "GPIO7_AA20"),
+       PINCTRL_PIN(AB8500_PIN_W18, "GPIO8_W18"),
+       PINCTRL_PIN(AB8500_PIN_AA19, "GPIO9_AA19"),
+       PINCTRL_PIN(AB8500_PIN_U17, "GPIO10_U17"),
+       PINCTRL_PIN(AB8500_PIN_AA18, "GPIO11_AA18"),
+       PINCTRL_PIN(AB8500_PIN_U16, "GPIO12_U16"),
+       PINCTRL_PIN(AB8500_PIN_W17, "GPIO13_W17"),
+       PINCTRL_PIN(AB8500_PIN_F14, "GPIO14_F14"),
+       PINCTRL_PIN(AB8500_PIN_B17, "GPIO15_B17"),
+       PINCTRL_PIN(AB8500_PIN_F15, "GPIO16_F15"),
+       PINCTRL_PIN(AB8500_PIN_P5, "GPIO17_P5"),
+       PINCTRL_PIN(AB8500_PIN_R5, "GPIO18_R5"),
+       PINCTRL_PIN(AB8500_PIN_U5, "GPIO19_U5"),
+       PINCTRL_PIN(AB8500_PIN_T5, "GPIO20_T5"),
+       PINCTRL_PIN(AB8500_PIN_H19, "GPIO21_H19"),
+       PINCTRL_PIN(AB8500_PIN_G20, "GPIO22_G20"),
+       PINCTRL_PIN(AB8500_PIN_G19, "GPIO23_G19"),
+       PINCTRL_PIN(AB8500_PIN_T14, "GPIO24_T14"),
+       PINCTRL_PIN(AB8500_PIN_R16, "GPIO25_R16"),
+       PINCTRL_PIN(AB8500_PIN_M16, "GPIO26_M16"),
+       PINCTRL_PIN(AB8500_PIN_J6, "GPIO27_J6"),
+       PINCTRL_PIN(AB8500_PIN_K6, "GPIO28_K6"),
+       PINCTRL_PIN(AB8500_PIN_G6, "GPIO29_G6"),
+       PINCTRL_PIN(AB8500_PIN_H6, "GPIO30_H6"),
+       PINCTRL_PIN(AB8500_PIN_F5, "GPIO31_F5"),
+       PINCTRL_PIN(AB8500_PIN_G5, "GPIO32_G5"),
+       /* hole */
+       PINCTRL_PIN(AB8500_PIN_R17, "GPIO34_R17"),
+       PINCTRL_PIN(AB8500_PIN_W15, "GPIO35_W15"),
+       PINCTRL_PIN(AB8500_PIN_A17, "GPIO36_A17"),
+       PINCTRL_PIN(AB8500_PIN_E15, "GPIO37_E15"),
+       PINCTRL_PIN(AB8500_PIN_C17, "GPIO38_C17"),
+       PINCTRL_PIN(AB8500_PIN_E16, "GPIO39_E16"),
+       PINCTRL_PIN(AB8500_PIN_T19, "GPIO40_T19"),
+       PINCTRL_PIN(AB8500_PIN_U19, "GPIO41_U19"),
+       PINCTRL_PIN(AB8500_PIN_U2, "GPIO42_U2"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab8500_pinranges[] = {
+       ABX500_PINRANGE(1, 4, ABX500_ALT_A),
+       ABX500_PINRANGE(6, 4, ABX500_ALT_A),
+       ABX500_PINRANGE(10, 4, ABX500_DEFAULT),
+       ABX500_PINRANGE(14, 12, ABX500_ALT_A),
+       ABX500_PINRANGE(26, 1, ABX500_DEFAULT),
+       ABX500_PINRANGE(27, 6, ABX500_ALT_A),
+       ABX500_PINRANGE(34, 1, ABX500_ALT_A),
+       ABX500_PINRANGE(35, 1, ABX500_DEFAULT),
+       ABX500_PINRANGE(36, 7, ABX500_ALT_A),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB8500_PIN_T10 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB8500_PIN_T9 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB8500_PIN_U9 };
+static const unsigned sysclkreq6_d_1_pins[] = { AB8500_PIN_W2 };
+static const unsigned ycbcr0123_d_1_pins[] = { AB8500_PIN_Y18, AB8500_PIN_AA20,
+                                       AB8500_PIN_W18, AB8500_PIN_AA19};
+static const unsigned gpio10_d_1_pins[] = { AB8500_PIN_U17 };
+static const unsigned gpio11_d_1_pins[] = { AB8500_PIN_AA18 };
+static const unsigned gpio12_d_1_pins[] = { AB8500_PIN_U16 };
+static const unsigned gpio13_d_1_pins[] = { AB8500_PIN_W17 };
+static const unsigned pwmout1_d_1_pins[] = { AB8500_PIN_F14 };
+static const unsigned pwmout2_d_1_pins[] = { AB8500_PIN_B17 };
+static const unsigned pwmout3_d_1_pins[] = { AB8500_PIN_F15 };
+
+/* audio data interface 1*/
+static const unsigned adi1_d_1_pins[] = { AB8500_PIN_P5, AB8500_PIN_R5,
+                                       AB8500_PIN_U5, AB8500_PIN_T5 };
+/* USBUICC */
+static const unsigned usbuicc_d_1_pins[] = { AB8500_PIN_H19, AB8500_PIN_G20,
+                                       AB8500_PIN_G19 };
+static const unsigned sysclkreq7_d_1_pins[] = { AB8500_PIN_T14 };
+static const unsigned sysclkreq8_d_1_pins[] = { AB8500_PIN_R16 };
+static const unsigned gpio26_d_1_pins[] = { AB8500_PIN_M16 };
+/* Digital microphone 1 and 2 */
+static const unsigned dmic12_d_1_pins[] = { AB8500_PIN_J6, AB8500_PIN_K6 };
+/* Digital microphone 3 and 4 */
+static const unsigned dmic34_d_1_pins[] = { AB8500_PIN_G6, AB8500_PIN_H6 };
+/* Digital microphone 5 and 6 */
+static const unsigned dmic56_d_1_pins[] = { AB8500_PIN_F5, AB8500_PIN_G5 };
+static const unsigned extcpena_d_1_pins[] = { AB8500_PIN_R17 };
+static const unsigned gpio35_d_1_pins[] = { AB8500_PIN_W15 };
+/* APE SPI */
+static const unsigned apespi_d_1_pins[] = { AB8500_PIN_A17, AB8500_PIN_E15,
+                                       AB8500_PIN_C17, AB8500_PIN_E16};
+/* modem SDA/SCL */
+static const unsigned modsclsda_d_1_pins[] = { AB8500_PIN_T19, AB8500_PIN_U19 };
+static const unsigned sysclkreq5_d_1_pins[] = { AB8500_PIN_U2 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB8500_PIN_T10 };
+static const unsigned gpio2_a_1_pins[] = { AB8500_PIN_T9 };
+static const unsigned gpio3_a_1_pins[] = { AB8500_PIN_U9 };
+static const unsigned gpio4_a_1_pins[] = { AB8500_PIN_W2 };
+static const unsigned gpio6_a_1_pins[] = { AB8500_PIN_Y18 };
+static const unsigned gpio7_a_1_pins[] = { AB8500_PIN_AA20 };
+static const unsigned gpio8_a_1_pins[] = { AB8500_PIN_W18 };
+static const unsigned gpio9_a_1_pins[] = { AB8500_PIN_AA19 };
+/* YCbCr4 YCbCr5 YCbCr6 YCbCr7*/
+static const unsigned ycbcr4567_a_1_pins[] = { AB8500_PIN_U17, AB8500_PIN_AA18,
+                                       AB8500_PIN_U16, AB8500_PIN_W17};
+static const unsigned gpio14_a_1_pins[] = { AB8500_PIN_F14 };
+static const unsigned gpio15_a_1_pins[] = { AB8500_PIN_B17 };
+static const unsigned gpio16_a_1_pins[] = { AB8500_PIN_F15 };
+static const unsigned gpio17_a_1_pins[] = { AB8500_PIN_P5 };
+static const unsigned gpio18_a_1_pins[] = { AB8500_PIN_R5 };
+static const unsigned gpio19_a_1_pins[] = { AB8500_PIN_U5 };
+static const unsigned gpio20_a_1_pins[] = { AB8500_PIN_T5 };
+static const unsigned gpio21_a_1_pins[] = { AB8500_PIN_H19 };
+static const unsigned gpio22_a_1_pins[] = { AB8500_PIN_G20 };
+static const unsigned gpio23_a_1_pins[] = { AB8500_PIN_G19 };
+static const unsigned gpio24_a_1_pins[] = { AB8500_PIN_T14 };
+static const unsigned gpio25_a_1_pins[] = { AB8500_PIN_R16 };
+static const unsigned gpio27_a_1_pins[] = { AB8500_PIN_J6 };
+static const unsigned gpio28_a_1_pins[] = { AB8500_PIN_K6 };
+static const unsigned gpio29_a_1_pins[] = { AB8500_PIN_G6 };
+static const unsigned gpio30_a_1_pins[] = { AB8500_PIN_H6 };
+static const unsigned gpio31_a_1_pins[] = { AB8500_PIN_F5 };
+static const unsigned gpio32_a_1_pins[] = { AB8500_PIN_G5 };
+static const unsigned gpio34_a_1_pins[] = { AB8500_PIN_R17 };
+static const unsigned gpio36_a_1_pins[] = { AB8500_PIN_A17 };
+static const unsigned gpio37_a_1_pins[] = { AB8500_PIN_E15 };
+static const unsigned gpio38_a_1_pins[] = { AB8500_PIN_C17 };
+static const unsigned gpio39_a_1_pins[] = { AB8500_PIN_E16 };
+static const unsigned gpio40_a_1_pins[] = { AB8500_PIN_T19 };
+static const unsigned gpio41_a_1_pins[] = { AB8500_PIN_U19 };
+static const unsigned gpio42_a_1_pins[] = { AB8500_PIN_U2 };
+
+/* Altfunction B colum */
+static const unsigned hiqclkena_b_1_pins[] = { AB8500_PIN_U17 };
+static const unsigned usbuiccpd_b_1_pins[] = { AB8500_PIN_AA18 };
+static const unsigned i2ctrig1_b_1_pins[] = { AB8500_PIN_U16 };
+static const unsigned i2ctrig2_b_1_pins[] = { AB8500_PIN_W17 };
+
+/* Altfunction C column */
+static const unsigned usbvdat_c_1_pins[] = { AB8500_PIN_W17 };
+
+
+#define AB8500_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins,         \
+                       .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab8500_groups[] = {
+       /* default column */
+       AB8500_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(ycbcr0123_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(gpio12_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(gpio26_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(gpio35_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(apespi_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
+       AB8500_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
+       /* Altfunction A column */
+       AB8500_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio6_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio7_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio8_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio9_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(ycbcr4567_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio21_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio22_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio23_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio24_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio25_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio36_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio37_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio38_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio39_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
+       AB8500_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
+       /* Altfunction B column */
+       AB8500_PIN_GROUP(hiqclkena_b_1, ABX500_ALT_B),
+       AB8500_PIN_GROUP(usbuiccpd_b_1, ABX500_ALT_B),
+       AB8500_PIN_GROUP(i2ctrig1_b_1, ABX500_ALT_B),
+       AB8500_PIN_GROUP(i2ctrig2_b_1, ABX500_ALT_B),
+       /* Altfunction C column */
+       AB8500_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB8500_FUNC_GROUPS(a, b...)       \
+static const char * const a##_groups[] = { b };
+
+AB8500_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+               "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1",
+               "sysclkreq7_d_1", "sysclkreq8_d_1");
+AB8500_FUNC_GROUPS(ycbcr, "ycbcr0123_d_1", "ycbcr4567_a_1");
+AB8500_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
+               "gpio6_a_1", "gpio7_a_1", "gpio8_a_1", "gpio9_a_1",
+               "gpio10_d_1", "gpio11_d_1", "gpio12_d_1", "gpio13_d_1",
+               "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1",
+               "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio21_a_1",
+               "gpio22_a_1", "gpio23_a_1", "gpio24_a_1", "gpio25_a_1",
+               "gpio26_d_1", "gpio27_a_1", "gpio28_a_1", "gpio29_a_1",
+               "gpio30_a_1", "gpio31_a_1", "gpio32_a_1", "gpio34_a_1",
+               "gpio35_d_1", "gpio36_a_1", "gpio37_a_1", "gpio38_a_1",
+               "gpio39_a_1", "gpio40_a_1", "gpio41_a_1", "gpio42_a_1");
+AB8500_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
+AB8500_FUNC_GROUPS(adi1, "adi1_d_1");
+AB8500_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_b_1");
+AB8500_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
+AB8500_FUNC_GROUPS(extcpena, "extcpena_d_1");
+AB8500_FUNC_GROUPS(apespi, "apespi_d_1");
+AB8500_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
+AB8500_FUNC_GROUPS(hiqclkena, "hiqclkena_b_1");
+AB8500_FUNC_GROUPS(i2ctrig, "i2ctrig1_b_1", "i2ctrig2_b_1");
+AB8500_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct abx500_function ab8500_functions[] = {
+       FUNCTION(sysclkreq),
+       FUNCTION(ycbcr),
+       FUNCTION(gpio),
+       FUNCTION(pwmout),
+       FUNCTION(adi1),
+       FUNCTION(usbuicc),
+       FUNCTION(dmic),
+       FUNCTION(extcpena),
+       FUNCTION(apespi),
+       FUNCTION(modsclsda),
+       FUNCTION(hiqclkena),
+       FUNCTION(i2ctrig),
+       FUNCTION(usbvdat),
+};
+
+/*
+ * this table translates what's is in the AB8500 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ *
+ * example :
+ *
+ *     ALTERNATE_FUNCTIONS(13,     4,      3,      4, 0, 1 ,2),
+ *     means that pin AB8500_PIN_W17 (pin 13) supports 4 mux (default/ALT_A,
+ *     ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
+ *     select the mux.  ALTA, ALTB and ALTC val indicates values to write in
+ *     ALTERNATFUNC register. We need to specifies these values as SOC
+ *     designers didn't apply the same logic on how to select mux in the
+ *     ABx500 family.
+ *
+ *     As this pins supports at least ALT_B mux, default mux is
+ *     selected by writing 1 in GPIOSEL bit :
+ *
+ *             | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
+ *     default |       1       |          0          |          0
+ *     alt_A   |       0       |          0          |          0
+ *     alt_B   |       0       |          0          |          1
+ *     alt_C   |       0       |          1          |          0
+ *
+ *     ALTERNATE_FUNCTIONS(8,      7, UNUSED, UNUSED),
+ *     means that pin AB8500_PIN_W18 (pin 8) supports 2 mux, so only GPIOSEL
+ *     register is used to select the mux. As this pins doesn't support at
+ *     least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
+ *
+ *             | GPIOSEL bit=7 | alternatfunc bit2=  | alternatfunc bit1=
+ *     default |       0       |          0          |          0
+ *     alt_A   |       1       |          0          |          0
+ */
+
+struct alternate_functions ab8500_alternate_functions[AB8500_GPIO_MAX_NUMBER + 1] = {
+       ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+       ALTERNATE_FUNCTIONS(1,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(2,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(3,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+       ALTERNATE_FUNCTIONS(4,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
+       /* bit 4 reserved */
+       ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
+       ALTERNATE_FUNCTIONS(6,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO6, altA controlled by bit 5*/
+       ALTERNATE_FUNCTIONS(7,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO7, altA controlled by bit 6*/
+       ALTERNATE_FUNCTIONS(8,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO8, altA controlled by bit 7*/
+
+       ALTERNATE_FUNCTIONS(9,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO9, altA controlled by bit 0*/
+       ALTERNATE_FUNCTIONS(10,     1,      0, UNUSED, 0, 1, 0), /* GPIO10, altA and altB controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(11,     2,      1, UNUSED, 0, 1, 0), /* GPIO11, altA and altB controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(12,     3,      2, UNUSED, 0, 1, 0), /* GPIO12, altA and altB controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(13,     4,      3,      4, 0, 1, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
+       ALTERNATE_FUNCTIONS(14,     5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(15,     6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(16,     7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
+       /*
+        * pins 17 to 20 are special case, only bit 0 is used to select
+        * alternate function for these 4 pins.
+        * bits 1 to 3 are reserved
+        */
+       ALTERNATE_FUNCTIONS(17,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(18,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(19,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(20,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(21,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(22,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(23,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(24,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */
+
+       ALTERNATE_FUNCTIONS(25,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */
+       /* pin 26 special case, no alternate function, bit 1 reserved */
+       ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* GPIO26 */
+       ALTERNATE_FUNCTIONS(27,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(28,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(29,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(30,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(31,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(32,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
+
+       ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
+       ALTERNATE_FUNCTIONS(34,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
+       /* pin 35 special case, no alternate function, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* GPIO35 */
+       ALTERNATE_FUNCTIONS(36,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO36, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(37,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO37, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(38,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO38, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(39,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO39, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(40,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */
+
+       ALTERNATE_FUNCTIONS(41,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(42,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
+};
+
+/*
+ * Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ *     GPIO6 to GPIO13
+ *     GPIO24 and GPIO25
+ *     GPIO36 to GPIO41
+ */
+struct abx500_gpio_irq_cluster ab8500_gpio_irq_cluster[] = {
+       GPIO_IRQ_CLUSTER(6,  13, AB8500_INT_GPIO6R),
+       GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R),
+       GPIO_IRQ_CLUSTER(36, 41, AB8500_INT_GPIO36R),
+};
+
+static struct abx500_pinctrl_soc_data ab8500_soc = {
+       .gpio_ranges = ab8500_pinranges,
+       .gpio_num_ranges = ARRAY_SIZE(ab8500_pinranges),
+       .pins = ab8500_pins,
+       .npins = ARRAY_SIZE(ab8500_pins),
+       .functions = ab8500_functions,
+       .nfunctions = ARRAY_SIZE(ab8500_functions),
+       .groups = ab8500_groups,
+       .ngroups = ARRAY_SIZE(ab8500_groups),
+       .alternate_functions = ab8500_alternate_functions,
+       .gpio_irq_cluster = ab8500_gpio_irq_cluster,
+       .ngpio_irq_cluster = ARRAY_SIZE(ab8500_gpio_irq_cluster),
+       .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
+       .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
+       .irq_gpio_factor = 1,
+};
+
+void abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc)
+{
+       *soc = &ab8500_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-ab8505.c b/drivers/pinctrl/pinctrl-ab8505.c
new file mode 100644 (file)
index 0000000..3a4238e
--- /dev/null
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset)    (offset)
+
+#define AB8505_PIN_N4          ABX500_GPIO(1)
+#define AB8505_PIN_R5          ABX500_GPIO(2)
+#define AB8505_PIN_P5          ABX500_GPIO(3)
+/* hole */
+#define AB8505_PIN_B16         ABX500_GPIO(10)
+#define AB8505_PIN_B17         ABX500_GPIO(11)
+/* hole */
+#define AB8505_PIN_D17         ABX500_GPIO(13)
+#define AB8505_PIN_C16         ABX500_GPIO(14)
+/* hole */
+#define AB8505_PIN_P2          ABX500_GPIO(17)
+#define AB8505_PIN_N3          ABX500_GPIO(18)
+#define AB8505_PIN_T1          ABX500_GPIO(19)
+#define AB8505_PIN_P3          ABX500_GPIO(20)
+/* hole */
+#define AB8505_PIN_H14         ABX500_GPIO(34)
+/* hole */
+#define AB8505_PIN_J15         ABX500_GPIO(40)
+#define AB8505_PIN_J14         ABX500_GPIO(41)
+/* hole */
+#define AB8505_PIN_L4          ABX500_GPIO(50)
+/* hole */
+#define AB8505_PIN_D16         ABX500_GPIO(52)
+#define AB8505_PIN_D15         ABX500_GPIO(53)
+
+/* indicates the higher GPIO number */
+#define AB8505_GPIO_MAX_NUMBER 53
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab8505_pins[] = {
+       PINCTRL_PIN(AB8505_PIN_N4, "GPIO1_N4"),
+       PINCTRL_PIN(AB8505_PIN_R5, "GPIO2_R5"),
+       PINCTRL_PIN(AB8505_PIN_P5, "GPIO3_P5"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_B16, "GPIO10_B16"),
+       PINCTRL_PIN(AB8505_PIN_B17, "GPIO11_B17"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_D17, "GPIO13_D17"),
+       PINCTRL_PIN(AB8505_PIN_C16, "GPIO14_C16"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_P2, "GPIO17_P2"),
+       PINCTRL_PIN(AB8505_PIN_N3, "GPIO18_N3"),
+       PINCTRL_PIN(AB8505_PIN_T1, "GPIO19_T1"),
+       PINCTRL_PIN(AB8505_PIN_P3, "GPIO20_P3"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_H14, "GPIO34_H14"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_J15, "GPIO40_J15"),
+       PINCTRL_PIN(AB8505_PIN_J14, "GPIO41_J14"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_L4, "GPIO50_L4"),
+/* hole */
+       PINCTRL_PIN(AB8505_PIN_D16, "GPIO52_D16"),
+       PINCTRL_PIN(AB8505_PIN_D15, "GPIO53_D15"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab8505_pinranges[] = {
+       ABX500_PINRANGE(1, 3, ABX500_ALT_A),
+       ABX500_PINRANGE(10, 2, ABX500_DEFAULT),
+       ABX500_PINRANGE(13, 1, ABX500_DEFAULT),
+       ABX500_PINRANGE(14, 1, ABX500_ALT_A),
+       ABX500_PINRANGE(17, 4, ABX500_ALT_A),
+       ABX500_PINRANGE(34, 1, ABX500_ALT_A),
+       ABX500_PINRANGE(40, 2, ABX500_ALT_A),
+       ABX500_PINRANGE(50, 1, ABX500_DEFAULT),
+       ABX500_PINRANGE(52, 2, ABX500_ALT_A),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB8505_PIN_N4 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB8505_PIN_R5 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB8505_PIN_P5 };
+static const unsigned gpio10_d_1_pins[] = { AB8505_PIN_B16 };
+static const unsigned gpio11_d_1_pins[] = { AB8505_PIN_B17 };
+static const unsigned gpio13_d_1_pins[] = { AB8505_PIN_D17 };
+static const unsigned pwmout1_d_1_pins[] = { AB8505_PIN_C16 };
+/* audio data interface 2*/
+static const unsigned adi2_d_1_pins[] = { AB8505_PIN_P2, AB8505_PIN_N3,
+                                       AB8505_PIN_T1, AB8505_PIN_P3 };
+static const unsigned extcpena_d_1_pins[] = { AB8505_PIN_H14 };
+/* modem SDA/SCL */
+static const unsigned modsclsda_d_1_pins[] = { AB8505_PIN_J15, AB8505_PIN_J14 };
+static const unsigned gpio50_d_1_pins[] = { AB8505_PIN_L4 };
+static const unsigned resethw_d_1_pins[] = { AB8505_PIN_D16 };
+static const unsigned service_d_1_pins[] = { AB8505_PIN_D15 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB8505_PIN_N4 };
+static const unsigned gpio2_a_1_pins[] = { AB8505_PIN_R5 };
+static const unsigned gpio3_a_1_pins[] = { AB8505_PIN_P5 };
+static const unsigned hiqclkena_a_1_pins[] = { AB8505_PIN_B16 };
+static const unsigned pdmclk_a_1_pins[] = { AB8505_PIN_B17 };
+static const unsigned uarttxdata_a_1_pins[] = { AB8505_PIN_D17 };
+static const unsigned gpio14_a_1_pins[] = { AB8505_PIN_C16 };
+static const unsigned gpio17_a_1_pins[] = { AB8505_PIN_P2 };
+static const unsigned gpio18_a_1_pins[] = { AB8505_PIN_N3 };
+static const unsigned gpio19_a_1_pins[] = { AB8505_PIN_T1 };
+static const unsigned gpio20_a_1_pins[] = { AB8505_PIN_P3 };
+static const unsigned gpio34_a_1_pins[] = { AB8505_PIN_H14 };
+static const unsigned gpio40_a_1_pins[] = { AB8505_PIN_J15 };
+static const unsigned gpio41_a_1_pins[] = { AB8505_PIN_J14 };
+static const unsigned uartrxdata_a_1_pins[] = { AB8505_PIN_J14 };
+static const unsigned gpio50_a_1_pins[] = { AB8505_PIN_L4 };
+static const unsigned gpio52_a_1_pins[] = { AB8505_PIN_D16 };
+static const unsigned gpio53_a_1_pins[] = { AB8505_PIN_D15 };
+
+/* Altfunction B colum */
+static const unsigned pdmdata_b_1_pins[] = { AB8505_PIN_B16 };
+static const unsigned extvibrapwm1_b_1_pins[] = { AB8505_PIN_D17 };
+static const unsigned extvibrapwm2_b_1_pins[] = { AB8505_PIN_L4 };
+
+/* Altfunction C column */
+static const unsigned usbvdat_c_1_pins[] = { AB8505_PIN_D17 };
+
+#define AB8505_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins,         \
+                       .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab8505_groups[] = {
+       AB8505_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(adi2_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(resethw_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(service_d_1, ABX500_DEFAULT),
+       AB8505_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(uarttxdata_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(uartrxdata_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
+       AB8505_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
+       AB8505_PIN_GROUP(extvibrapwm1_b_1, ABX500_ALT_B),
+       AB8505_PIN_GROUP(extvibrapwm2_b_1, ABX500_ALT_B),
+       AB8505_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB8505_FUNC_GROUPS(a, b...)       \
+static const char * const a##_groups[] = { b };
+
+AB8505_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+               "sysclkreq4_d_1");
+AB8505_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1",
+               "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1",
+               "gpio17_a_1", "gpio18_a_1", "gpio19_a_1", "gpio20_a_1",
+               "gpio34_a_1", "gpio40_a_1", "gpio41_a_1", "gpio50_d_1",
+               "gpio52_a_1", "gpio53_a_1");
+AB8505_FUNC_GROUPS(pwmout, "pwmout1_d_1");
+AB8505_FUNC_GROUPS(adi2, "adi2_d_1");
+AB8505_FUNC_GROUPS(extcpena, "extcpena_d_1");
+AB8505_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
+AB8505_FUNC_GROUPS(resethw, "resethw_d_1");
+AB8505_FUNC_GROUPS(service, "service_d_1");
+AB8505_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1");
+AB8505_FUNC_GROUPS(pdm, "pdmclk_a_1", "pdmdata_b_1");
+AB8505_FUNC_GROUPS(uartdata, "uarttxdata_a_1", "uartrxdata_a_1");
+AB8505_FUNC_GROUPS(extvibra, "extvibrapwm1_b_1", "extvibrapwm2_b_1");
+AB8505_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct abx500_function ab8505_functions[] = {
+       FUNCTION(sysclkreq),
+       FUNCTION(gpio),
+       FUNCTION(pwmout),
+       FUNCTION(adi2),
+       FUNCTION(extcpena),
+       FUNCTION(modsclsda),
+       FUNCTION(resethw),
+       FUNCTION(service),
+       FUNCTION(hiqclkena),
+       FUNCTION(pdm),
+       FUNCTION(uartdata),
+       FUNCTION(extvibra),
+       FUNCTION(extvibra),
+       FUNCTION(usbvdat),
+};
+
+/*
+ * this table translates what's is in the AB8505 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ *
+ * example :
+ *
+ *     ALTERNATE_FUNCTIONS(13,     4,      3,      4, 1, 0, 2),
+ *     means that pin AB8505_PIN_D18 (pin 13) supports 4 mux (default/ALT_A,
+ *     ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
+ *     select the mux. ALTA, ALTB and ALTC val indicates values to write in
+ *     ALTERNATFUNC register. We need to specifies these values as SOC
+ *     designers didn't apply the same logic on how to select mux in the
+ *     ABx500 family.
+ *
+ *     As this pins supports at least ALT_B mux, default mux is
+ *     selected by writing 1 in GPIOSEL bit :
+ *
+ *             | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
+ *     default |       1       |          0          |          0
+ *     alt_A   |       0       |          0          |          1
+ *     alt_B   |       0       |          0          |          0
+ *     alt_C   |       0       |          1          |          0
+ *
+ *     ALTERNATE_FUNCTIONS(1,      0, UNUSED, UNUSED),
+ *     means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL
+ *     register is used to select the mux. As this pins doesn't support at
+ *     least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
+ *
+ *             | GPIOSEL bit=0 | alternatfunc bit2=  | alternatfunc bit1=
+ *     default |       0       |          0          |          0
+ *     alt_A   |       1       |          0          |          0
+ */
+
+struct alternate_functions ab8505_alternate_functions[AB8505_GPIO_MAX_NUMBER + 1] = {
+       ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+       ALTERNATE_FUNCTIONS(1,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(2,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(3,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+       ALTERNATE_FUNCTIONS(4, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO4, bit 3 reserved */
+       ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5, bit 4 reserved */
+       ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6, bit 5 reserved */
+       ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7, bit 6 reserved */
+       ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8, bit 7 reserved */
+
+       ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9, bit 0 reserved */
+       ALTERNATE_FUNCTIONS(10,      1,      0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(11,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12, bit3 reseved */
+       ALTERNATE_FUNCTIONS(13,      4,      3,      4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
+       ALTERNATE_FUNCTIONS(14,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(15, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO15, bit 6 reserved */
+       ALTERNATE_FUNCTIONS(16, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO15, bit 7 reserved  */
+       /*
+        * pins 17 to 20 are special case, only bit 0 is used to select
+        * alternate function for these 4 pins.
+        * bits 1 to 3 are reserved
+        */
+       ALTERNATE_FUNCTIONS(17,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(18,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(19,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(20,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21, bit 4 reserved */
+       ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22, bit 5 reserved */
+       ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23, bit 6 reserved */
+       ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24, bit 7 reserved */
+
+       ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25, bit 0 reserved */
+       ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26, bit 1 reserved */
+       ALTERNATE_FUNCTIONS(27, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO27, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(28, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO28, bit 3 reserved */
+       ALTERNATE_FUNCTIONS(29, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO29, bit 4 reserved */
+       ALTERNATE_FUNCTIONS(30, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO30, bit 5 reserved */
+       ALTERNATE_FUNCTIONS(31, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO31, bit 6 reserved */
+       ALTERNATE_FUNCTIONS(32, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO32, bit 7 reserved */
+
+       ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33, bit 0 reserved */
+       ALTERNATE_FUNCTIONS(34,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(40,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7*/
+
+       ALTERNATE_FUNCTIONS(41,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(42, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO42, bit 1 reserved */
+       ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43, bit 2 reserved */
+       ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44, bit 3 reserved */
+       ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45, bit 4 reserved */
+       ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46, bit 5 reserved */
+       ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47, bit 6 reserved */
+       ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48, bit 7 reserved */
+
+       ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49, bit 0 reserved */
+       ALTERNATE_FUNCTIONS(50,      1,      2, UNUSED, 1, 0, 0), /* GPIO50, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(51, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49, bit 0 reserved */
+       ALTERNATE_FUNCTIONS(52,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(53,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
+};
+
+/*
+ * For AB8505 Only some GPIOs are interrupt capable, and they are
+ * organized in discontiguous clusters:
+ *
+ *     GPIO10 to GPIO11
+ *     GPIO13
+ *     GPIO40 and GPIO41
+ *     GPIO50
+ *     GPIO52 to GPIO53
+ */
+struct abx500_gpio_irq_cluster ab8505_gpio_irq_cluster[] = {
+       GPIO_IRQ_CLUSTER(10, 11, AB8500_INT_GPIO10R),
+       GPIO_IRQ_CLUSTER(13, 13, AB8500_INT_GPIO13R),
+       GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R),
+       GPIO_IRQ_CLUSTER(50, 50, AB9540_INT_GPIO50R),
+       GPIO_IRQ_CLUSTER(52, 53, AB9540_INT_GPIO52R),
+};
+
+static struct abx500_pinctrl_soc_data ab8505_soc = {
+       .gpio_ranges = ab8505_pinranges,
+       .gpio_num_ranges = ARRAY_SIZE(ab8505_pinranges),
+       .pins = ab8505_pins,
+       .npins = ARRAY_SIZE(ab8505_pins),
+       .functions = ab8505_functions,
+       .nfunctions = ARRAY_SIZE(ab8505_functions),
+       .groups = ab8505_groups,
+       .ngroups = ARRAY_SIZE(ab8505_groups),
+       .alternate_functions = ab8505_alternate_functions,
+       .gpio_irq_cluster = ab8505_gpio_irq_cluster,
+       .ngpio_irq_cluster = ARRAY_SIZE(ab8505_gpio_irq_cluster),
+       .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
+       .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
+       .irq_gpio_factor = 1,
+};
+
+void
+abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc)
+{
+       *soc = &ab8505_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-ab8540.c b/drivers/pinctrl/pinctrl-ab8540.c
new file mode 100644 (file)
index 0000000..8ee1e8d
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset)            (offset)
+
+#define AB8540_PIN_J16         ABX500_GPIO(1)
+#define AB8540_PIN_D17         ABX500_GPIO(2)
+#define AB8540_PIN_C12         ABX500_GPIO(3)
+#define AB8540_PIN_G12         ABX500_GPIO(4)
+/* hole */
+#define AB8540_PIN_D16         ABX500_GPIO(14)
+#define AB8540_PIN_F15         ABX500_GPIO(15)
+#define AB8540_PIN_J8          ABX500_GPIO(16)
+#define AB8540_PIN_K16         ABX500_GPIO(17)
+#define AB8540_PIN_G15         ABX500_GPIO(18)
+#define AB8540_PIN_F17         ABX500_GPIO(19)
+#define AB8540_PIN_E17         ABX500_GPIO(20)
+/* hole */
+#define AB8540_PIN_AA16                ABX500_GPIO(27)
+#define AB8540_PIN_W18         ABX500_GPIO(28)
+#define AB8540_PIN_Y15         ABX500_GPIO(29)
+#define AB8540_PIN_W16         ABX500_GPIO(30)
+#define AB8540_PIN_V15         ABX500_GPIO(31)
+#define AB8540_PIN_W17         ABX500_GPIO(32)
+/* hole */
+#define AB8540_PIN_D12         ABX500_GPIO(42)
+#define AB8540_PIN_P4          ABX500_GPIO(43)
+#define AB8540_PIN_AB1         ABX500_GPIO(44)
+#define AB8540_PIN_K7          ABX500_GPIO(45)
+#define AB8540_PIN_L7          ABX500_GPIO(46)
+#define AB8540_PIN_G10         ABX500_GPIO(47)
+#define AB8540_PIN_K12         ABX500_GPIO(48)
+/* hole */
+#define AB8540_PIN_N8          ABX500_GPIO(51)
+#define AB8540_PIN_P12         ABX500_GPIO(52)
+#define AB8540_PIN_K8          ABX500_GPIO(53)
+#define AB8540_PIN_J11         ABX500_GPIO(54)
+#define AB8540_PIN_AC2         ABX500_GPIO(55)
+#define AB8540_PIN_AB2         ABX500_GPIO(56)
+
+/* indicates the highest GPIO number */
+#define AB8540_GPIO_MAX_NUMBER 56
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab8540_pins[] = {
+       PINCTRL_PIN(AB8540_PIN_J16, "GPIO1_J16"),
+       PINCTRL_PIN(AB8540_PIN_D17, "GPIO2_D17"),
+       PINCTRL_PIN(AB8540_PIN_C12, "GPIO3_C12"),
+       PINCTRL_PIN(AB8540_PIN_G12, "GPIO4_G12"),
+       /* hole */
+       PINCTRL_PIN(AB8540_PIN_D16, "GPIO14_D16"),
+       PINCTRL_PIN(AB8540_PIN_F15, "GPIO15_F15"),
+       PINCTRL_PIN(AB8540_PIN_J8, "GPIO16_J8"),
+       PINCTRL_PIN(AB8540_PIN_K16, "GPIO17_K16"),
+       PINCTRL_PIN(AB8540_PIN_G15, "GPIO18_G15"),
+       PINCTRL_PIN(AB8540_PIN_F17, "GPIO19_F17"),
+       PINCTRL_PIN(AB8540_PIN_E17, "GPIO20_E17"),
+       /* hole */
+       PINCTRL_PIN(AB8540_PIN_AA16, "GPIO27_AA16"),
+       PINCTRL_PIN(AB8540_PIN_W18, "GPIO28_W18"),
+       PINCTRL_PIN(AB8540_PIN_Y15, "GPIO29_Y15"),
+       PINCTRL_PIN(AB8540_PIN_W16, "GPIO30_W16"),
+       PINCTRL_PIN(AB8540_PIN_V15, "GPIO31_V15"),
+       PINCTRL_PIN(AB8540_PIN_W17, "GPIO32_W17"),
+       /* hole */
+       PINCTRL_PIN(AB8540_PIN_D12, "GPIO42_D12"),
+       PINCTRL_PIN(AB8540_PIN_P4, "GPIO43_P4"),
+       PINCTRL_PIN(AB8540_PIN_AB1, "GPIO44_AB1"),
+       PINCTRL_PIN(AB8540_PIN_K7, "GPIO45_K7"),
+       PINCTRL_PIN(AB8540_PIN_L7, "GPIO46_L7"),
+       PINCTRL_PIN(AB8540_PIN_G10, "GPIO47_G10"),
+       PINCTRL_PIN(AB8540_PIN_K12, "GPIO48_K12"),
+       /* hole */
+       PINCTRL_PIN(AB8540_PIN_N8, "GPIO51_N8"),
+       PINCTRL_PIN(AB8540_PIN_P12, "GPIO52_P12"),
+       PINCTRL_PIN(AB8540_PIN_K8, "GPIO53_K8"),
+       PINCTRL_PIN(AB8540_PIN_J11, "GPIO54_J11"),
+       PINCTRL_PIN(AB8540_PIN_AC2, "GPIO55_AC2"),
+       PINCTRL_PIN(AB8540_PIN_AB2, "GPIO56_AB2"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab8540_pinranges[] = {
+       ABX500_PINRANGE(1, 4, ABX500_ALT_A),
+       ABX500_PINRANGE(14, 7, ABX500_ALT_A),
+       ABX500_PINRANGE(27, 6, ABX500_ALT_A),
+       ABX500_PINRANGE(42, 7, ABX500_ALT_A),
+       ABX500_PINRANGE(51, 6, ABX500_ALT_A),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB8540_PIN_J16 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB8540_PIN_D17 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB8540_PIN_C12 };
+static const unsigned sysclkreq6_d_1_pins[] = { AB8540_PIN_G12 };
+static const unsigned pwmout1_d_1_pins[] = { AB8540_PIN_D16 };
+static const unsigned pwmout2_d_1_pins[] = { AB8540_PIN_F15 };
+static const unsigned pwmout3_d_1_pins[] = { AB8540_PIN_J8 };
+
+/* audio data interface 1*/
+static const unsigned adi1_d_1_pins[] = { AB8540_PIN_K16, AB8540_PIN_G15,
+                                       AB8540_PIN_F17, AB8540_PIN_E17 };
+/* Digital microphone 1 and 2 */
+static const unsigned dmic12_d_1_pins[] = { AB8540_PIN_AA16, AB8540_PIN_W18 };
+/* Digital microphone 3 and 4 */
+static const unsigned dmic34_d_1_pins[] = { AB8540_PIN_Y15, AB8540_PIN_W16 };
+/* Digital microphone 5 and 6 */
+static const unsigned dmic56_d_1_pins[] = { AB8540_PIN_V15, AB8540_PIN_W17 };
+static const unsigned sysclkreq5_d_1_pins[] = { AB8540_PIN_D12 };
+static const unsigned batremn_d_1_pins[] = { AB8540_PIN_P4 };
+static const unsigned service_d_1_pins[] = { AB8540_PIN_AB1 };
+static const unsigned pwrctrl0_d_1_pins[] = { AB8540_PIN_K7 };
+static const unsigned pwrctrl1_d_1_pins[] = { AB8540_PIN_L7 };
+static const unsigned pwmextvibra1_d_1_pins[] = { AB8540_PIN_G10 };
+static const unsigned pwmextvibra2_d_1_pins[] = { AB8540_PIN_K12 };
+static const unsigned gpio1_vbat_d_1_pins[] = { AB8540_PIN_N8 };
+static const unsigned gpio2_vbat_d_1_pins[] = { AB8540_PIN_P12 };
+static const unsigned gpio3_vbat_d_1_pins[] = { AB8540_PIN_K8 };
+static const unsigned gpio4_vbat_d_1_pins[] = { AB8540_PIN_J11 };
+static const unsigned pdmclkdat_d_1_pins[] = { AB8540_PIN_AC2, AB8540_PIN_AB2 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB8540_PIN_J16 };
+static const unsigned gpio2_a_1_pins[] = { AB8540_PIN_D17 };
+static const unsigned gpio3_a_1_pins[] = { AB8540_PIN_C12 };
+static const unsigned gpio4_a_1_pins[] = { AB8540_PIN_G12 };
+static const unsigned gpio14_a_1_pins[] = { AB8540_PIN_D16 };
+static const unsigned gpio15_a_1_pins[] = { AB8540_PIN_F15 };
+static const unsigned gpio16_a_1_pins[] = { AB8540_PIN_J8 };
+static const unsigned gpio17_a_1_pins[] = { AB8540_PIN_K16 };
+static const unsigned gpio18_a_1_pins[] = { AB8540_PIN_G15 };
+static const unsigned gpio19_a_1_pins[] = { AB8540_PIN_F17 };
+static const unsigned gpio20_a_1_pins[] = { AB8540_PIN_E17 };
+static const unsigned gpio27_a_1_pins[] = { AB8540_PIN_AA16 };
+static const unsigned gpio28_a_1_pins[] = { AB8540_PIN_W18 };
+static const unsigned gpio29_a_1_pins[] = { AB8540_PIN_Y15 };
+static const unsigned gpio30_a_1_pins[] = { AB8540_PIN_W16 };
+static const unsigned gpio31_a_1_pins[] = { AB8540_PIN_V15 };
+static const unsigned gpio32_a_1_pins[] = { AB8540_PIN_W17 };
+static const unsigned gpio42_a_1_pins[] = { AB8540_PIN_D12 };
+static const unsigned gpio43_a_1_pins[] = { AB8540_PIN_P4 };
+static const unsigned gpio44_a_1_pins[] = { AB8540_PIN_AB1 };
+static const unsigned gpio45_a_1_pins[] = { AB8540_PIN_K7 };
+static const unsigned gpio46_a_1_pins[] = { AB8540_PIN_L7 };
+static const unsigned gpio47_a_1_pins[] = { AB8540_PIN_G10 };
+static const unsigned gpio48_a_1_pins[] = { AB8540_PIN_K12 };
+static const unsigned gpio51_a_1_pins[] = { AB8540_PIN_N8 };
+static const unsigned gpio52_a_1_pins[] = { AB8540_PIN_P12 };
+static const unsigned gpio53_a_1_pins[] = { AB8540_PIN_K8 };
+static const unsigned gpio54_a_1_pins[] = { AB8540_PIN_J11 };
+static const unsigned gpio55_a_1_pins[] = { AB8540_PIN_AC2 };
+static const unsigned gpio56_a_1_pins[] = { AB8540_PIN_AB2 };
+
+#define AB8540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins,         \
+                       .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab8540_groups[] = {
+       /* default column */
+       AB8540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(service_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwrctrl0_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwrctrl1_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwmextvibra1_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pwmextvibra2_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(gpio1_vbat_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(gpio2_vbat_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(gpio3_vbat_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(gpio4_vbat_d_1, ABX500_DEFAULT),
+       AB8540_PIN_GROUP(pdmclkdat_d_1, ABX500_DEFAULT),
+       /* Altfunction A column */
+       AB8540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio43_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio44_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio45_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio46_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio47_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio48_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio54_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio55_a_1, ABX500_ALT_A),
+       AB8540_PIN_GROUP(gpio56_a_1, ABX500_ALT_A),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB8540_FUNC_GROUPS(a, b...)       \
+static const char * const a##_groups[] = { b };
+
+AB8540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+               "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1");
+AB8540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
+               "gpio14_a_1", "gpio15_a_1", "gpio16_a_1", "gpio17_a_1",
+               "gpio18_a_1", "gpio19_a_1", "gpio20_a_1", "gpio27_a_1",
+               "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1",
+               "gpio32_a_1", "gpio42_a_1", "gpio43_a_1", "gpio44_a_1",
+               "gpio45_a_1", "gpio46_a_1", "gpio47_a_1", "gpio48_a_1",
+               "gpio51_a_1", "gpio52_a_1", "gpio53_a_1", "gpio54_a_1",
+               "gpio55_a_1", "gpio56_a_1");
+AB8540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
+AB8540_FUNC_GROUPS(adi1, "adi1_d_1");
+AB8540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
+AB8540_FUNC_GROUPS(batremn, "batremn_d_1");
+AB8540_FUNC_GROUPS(service, "service_d_1");
+AB8540_FUNC_GROUPS(pwrctrl, "pwrctrl0_d_1", "pwrctrl1_d_1");
+AB8540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_d_1", "pwmextvibra2_d_1");
+AB8540_FUNC_GROUPS(gpio_vbat, "gpio1_vbat_d_1", "gpio2_vbat_d_1",
+               "gpio3_vbat_d_1", "gpio4_vbat_d_1");
+AB8540_FUNC_GROUPS(pdm, "pdmclkdat_d_1");
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct abx500_function ab8540_functions[] = {
+       FUNCTION(sysclkreq),
+       FUNCTION(gpio),
+       FUNCTION(pwmout),
+       FUNCTION(adi1),
+       FUNCTION(dmic),
+       FUNCTION(batremn),
+       FUNCTION(service),
+       FUNCTION(pwrctrl),
+       FUNCTION(pwmextvibra),
+       FUNCTION(gpio_vbat),
+       FUNCTION(pdm),
+};
+
+/*
+ * this table translates what's is in the AB8540 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ * AB8540 only supports DEFAULT and ALTA functions, so ALTERNATFUNC
+ * registers is not used
+ *
+ */
+
+struct alternate_functions ab8540_alternate_functions[AB8540_GPIO_MAX_NUMBER + 1] = {
+       /* GPIOSEL1 - bit 4-7 reserved */
+       ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+       ALTERNATE_FUNCTIONS(1,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(2,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(3,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+       ALTERNATE_FUNCTIONS(4,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
+       ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
+       ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */
+       ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */
+       ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */
+       /* GPIOSEL2 - bit 0-4 reserved */
+       ALTERNATE_FUNCTIONS(9,  UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */
+       ALTERNATE_FUNCTIONS(10, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO10 */
+       ALTERNATE_FUNCTIONS(11, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO11 */
+       ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */
+       ALTERNATE_FUNCTIONS(13, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO13 */
+       ALTERNATE_FUNCTIONS(14,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(15,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(16,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
+       /* GPIOSEL3 - bit 4-7 reserved */
+       ALTERNATE_FUNCTIONS(17,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(18,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(19,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(20,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(21, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO21 */
+       ALTERNATE_FUNCTIONS(22, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO22 */
+       ALTERNATE_FUNCTIONS(23, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO23 */
+       ALTERNATE_FUNCTIONS(24, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO24 */
+       /* GPIOSEL4 - bit 0-1 reserved */
+       ALTERNATE_FUNCTIONS(25, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO25 */
+       ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */
+       ALTERNATE_FUNCTIONS(27,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(28,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(29,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(30,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(31,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(32,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
+       /* GPIOSEL5 - bit 0-7 reserved */
+       ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
+       ALTERNATE_FUNCTIONS(34, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO34 */
+       ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */
+       ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */
+       ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */
+       ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */
+       ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */
+       ALTERNATE_FUNCTIONS(40, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO40 */
+       /* GPIOSEL6 - bit 0 reserved */
+       ALTERNATE_FUNCTIONS(41, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO41 */
+       ALTERNATE_FUNCTIONS(42,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(43,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO43, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(44,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO44, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(45,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO45, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(46,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO46, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(47,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO47, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(48,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO48, altA controlled by bit 7 */
+       /* GPIOSEL7 - bit 0-1 reserved */
+       ALTERNATE_FUNCTIONS(49, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */
+       ALTERNATE_FUNCTIONS(50, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO50 */
+       ALTERNATE_FUNCTIONS(51,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(52,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(53,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(54,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(55,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO55, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(56,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO56, altA controlled by bit 7 */
+};
+
+static struct pullud ab8540_pullud = {
+       .first_pin = 51,        /* GPIO1_VBAT */
+       .last_pin = 54,         /* GPIO4_VBAT */
+};
+
+/*
+ * For AB8540 Only some GPIOs are interrupt capable:
+ *     GPIO43 to GPIO44
+ *     GPIO51 to GPIO54
+ */
+struct abx500_gpio_irq_cluster ab8540_gpio_irq_cluster[] = {
+       GPIO_IRQ_CLUSTER(43, 43, AB8540_INT_GPIO43F),
+       GPIO_IRQ_CLUSTER(44, 44, AB8540_INT_GPIO44F),
+       GPIO_IRQ_CLUSTER(51, 54, AB9540_INT_GPIO51R),
+};
+
+static struct abx500_pinctrl_soc_data ab8540_soc = {
+       .gpio_ranges = ab8540_pinranges,
+       .gpio_num_ranges = ARRAY_SIZE(ab8540_pinranges),
+       .pins = ab8540_pins,
+       .npins = ARRAY_SIZE(ab8540_pins),
+       .functions = ab8540_functions,
+       .nfunctions = ARRAY_SIZE(ab8540_functions),
+       .groups = ab8540_groups,
+       .ngroups = ARRAY_SIZE(ab8540_groups),
+       .alternate_functions = ab8540_alternate_functions,
+       .pullud = &ab8540_pullud,
+       .gpio_irq_cluster = ab8540_gpio_irq_cluster,
+       .ngpio_irq_cluster = ARRAY_SIZE(ab8540_gpio_irq_cluster),
+       .irq_gpio_rising_offset = AB8540_INT_GPIO43R,
+       .irq_gpio_falling_offset = AB8540_INT_GPIO43F,
+       .irq_gpio_factor = 2,
+};
+
+void
+abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc)
+{
+       *soc = &ab8540_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-ab9540.c b/drivers/pinctrl/pinctrl-ab9540.c
new file mode 100644 (file)
index 0000000..7610bd0
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Patrice Chotard <patrice.chotard@stericsson.com> for ST-Ericsson.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include "pinctrl-abx500.h"
+
+/* All the pins that can be used for GPIO and some other functions */
+#define ABX500_GPIO(offset)            (offset)
+
+#define AB9540_PIN_R4          ABX500_GPIO(1)
+#define AB9540_PIN_V3          ABX500_GPIO(2)
+#define AB9540_PIN_T4          ABX500_GPIO(3)
+#define AB9540_PIN_T5          ABX500_GPIO(4)
+/* hole */
+#define AB9540_PIN_B18         ABX500_GPIO(10)
+#define AB9540_PIN_C18         ABX500_GPIO(11)
+/* hole */
+#define AB9540_PIN_D18         ABX500_GPIO(13)
+#define AB9540_PIN_B19         ABX500_GPIO(14)
+#define AB9540_PIN_C19         ABX500_GPIO(15)
+#define AB9540_PIN_D19         ABX500_GPIO(16)
+#define AB9540_PIN_R3          ABX500_GPIO(17)
+#define AB9540_PIN_T2          ABX500_GPIO(18)
+#define AB9540_PIN_U2          ABX500_GPIO(19)
+#define AB9540_PIN_V2          ABX500_GPIO(20)
+#define AB9540_PIN_N17         ABX500_GPIO(21)
+#define AB9540_PIN_N16         ABX500_GPIO(22)
+#define AB9540_PIN_M19         ABX500_GPIO(23)
+#define AB9540_PIN_T3          ABX500_GPIO(24)
+#define AB9540_PIN_W2          ABX500_GPIO(25)
+/* hole */
+#define AB9540_PIN_H4          ABX500_GPIO(27)
+#define AB9540_PIN_F1          ABX500_GPIO(28)
+#define AB9540_PIN_F4          ABX500_GPIO(29)
+#define AB9540_PIN_F2          ABX500_GPIO(30)
+#define AB9540_PIN_E4          ABX500_GPIO(31)
+#define AB9540_PIN_F3          ABX500_GPIO(32)
+/* hole */
+#define AB9540_PIN_J13         ABX500_GPIO(34)
+/* hole */
+#define AB9540_PIN_L17         ABX500_GPIO(40)
+#define AB9540_PIN_L16         ABX500_GPIO(41)
+#define AB9540_PIN_W3          ABX500_GPIO(42)
+#define AB9540_PIN_N4          ABX500_GPIO(50)
+#define AB9540_PIN_G12         ABX500_GPIO(51)
+#define AB9540_PIN_E17         ABX500_GPIO(52)
+#define AB9540_PIN_D11         ABX500_GPIO(53)
+#define AB9540_PIN_M18         ABX500_GPIO(54)
+
+/* indicates the highest GPIO number */
+#define AB9540_GPIO_MAX_NUMBER 54
+
+/*
+ * The names of the pins are denoted by GPIO number and ball name, even
+ * though they can be used for other things than GPIO, this is the first
+ * column in the table of the data sheet and often used on schematics and
+ * such.
+ */
+static const struct pinctrl_pin_desc ab9540_pins[] = {
+       PINCTRL_PIN(AB9540_PIN_R4, "GPIO1_R4"),
+       PINCTRL_PIN(AB9540_PIN_V3, "GPIO2_V3"),
+       PINCTRL_PIN(AB9540_PIN_T4, "GPIO3_T4"),
+       PINCTRL_PIN(AB9540_PIN_T5, "GPIO4_T5"),
+       /* hole */
+       PINCTRL_PIN(AB9540_PIN_B18, "GPIO10_B18"),
+       PINCTRL_PIN(AB9540_PIN_C18, "GPIO11_C18"),
+       /* hole */
+       PINCTRL_PIN(AB9540_PIN_D18, "GPIO13_D18"),
+       PINCTRL_PIN(AB9540_PIN_B19, "GPIO14_B19"),
+       PINCTRL_PIN(AB9540_PIN_C19, "GPIO15_C19"),
+       PINCTRL_PIN(AB9540_PIN_D19, "GPIO16_D19"),
+       PINCTRL_PIN(AB9540_PIN_R3, "GPIO17_R3"),
+       PINCTRL_PIN(AB9540_PIN_T2, "GPIO18_T2"),
+       PINCTRL_PIN(AB9540_PIN_U2, "GPIO19_U2"),
+       PINCTRL_PIN(AB9540_PIN_V2, "GPIO20_V2"),
+       PINCTRL_PIN(AB9540_PIN_N17, "GPIO21_N17"),
+       PINCTRL_PIN(AB9540_PIN_N16, "GPIO22_N16"),
+       PINCTRL_PIN(AB9540_PIN_M19, "GPIO23_M19"),
+       PINCTRL_PIN(AB9540_PIN_T3, "GPIO24_T3"),
+       PINCTRL_PIN(AB9540_PIN_W2, "GPIO25_W2"),
+       /* hole */
+       PINCTRL_PIN(AB9540_PIN_H4, "GPIO27_H4"),
+       PINCTRL_PIN(AB9540_PIN_F1, "GPIO28_F1"),
+       PINCTRL_PIN(AB9540_PIN_F4, "GPIO29_F4"),
+       PINCTRL_PIN(AB9540_PIN_F2, "GPIO30_F2"),
+       PINCTRL_PIN(AB9540_PIN_E4, "GPIO31_E4"),
+       PINCTRL_PIN(AB9540_PIN_F3, "GPIO32_F3"),
+       /* hole */
+       PINCTRL_PIN(AB9540_PIN_J13, "GPIO34_J13"),
+       /* hole */
+       PINCTRL_PIN(AB9540_PIN_L17, "GPIO40_L17"),
+       PINCTRL_PIN(AB9540_PIN_L16, "GPIO41_L16"),
+       PINCTRL_PIN(AB9540_PIN_W3, "GPIO42_W3"),
+       PINCTRL_PIN(AB9540_PIN_N4, "GPIO50_N4"),
+       PINCTRL_PIN(AB9540_PIN_G12, "GPIO51_G12"),
+       PINCTRL_PIN(AB9540_PIN_E17, "GPIO52_E17"),
+       PINCTRL_PIN(AB9540_PIN_D11, "GPIO53_D11"),
+       PINCTRL_PIN(AB9540_PIN_M18, "GPIO60_M18"),
+};
+
+/*
+ * Maps local GPIO offsets to local pin numbers
+ */
+static const struct abx500_pinrange ab9540_pinranges[] = {
+       ABX500_PINRANGE(1, 4, ABX500_ALT_A),
+       ABX500_PINRANGE(10, 2, ABX500_DEFAULT),
+       ABX500_PINRANGE(13, 1, ABX500_DEFAULT),
+       ABX500_PINRANGE(14, 12, ABX500_ALT_A),
+       ABX500_PINRANGE(27, 6, ABX500_ALT_A),
+       ABX500_PINRANGE(34, 1, ABX500_ALT_A),
+       ABX500_PINRANGE(40, 3, ABX500_ALT_A),
+       ABX500_PINRANGE(50, 1, ABX500_DEFAULT),
+       ABX500_PINRANGE(51, 3, ABX500_ALT_A),
+       ABX500_PINRANGE(54, 1, ABX500_DEFAULT),
+};
+
+/*
+ * Read the pin group names like this:
+ * sysclkreq2_d_1 = first groups of pins for sysclkreq2 on default function
+ *
+ * The groups are arranged as sets per altfunction column, so we can
+ * mux in one group at a time by selecting the same altfunction for them
+ * all. When functions require pins on different altfunctions, you need
+ * to combine several groups.
+ */
+
+/* default column */
+static const unsigned sysclkreq2_d_1_pins[] = { AB9540_PIN_R4 };
+static const unsigned sysclkreq3_d_1_pins[] = { AB9540_PIN_V3 };
+static const unsigned sysclkreq4_d_1_pins[] = { AB9540_PIN_T4 };
+static const unsigned sysclkreq6_d_1_pins[] = { AB9540_PIN_T5 };
+static const unsigned gpio10_d_1_pins[] = { AB9540_PIN_B18 };
+static const unsigned gpio11_d_1_pins[] = { AB9540_PIN_C18 };
+static const unsigned gpio13_d_1_pins[] = { AB9540_PIN_D18 };
+static const unsigned pwmout1_d_1_pins[] = { AB9540_PIN_B19 };
+static const unsigned pwmout2_d_1_pins[] = { AB9540_PIN_C19 };
+static const unsigned pwmout3_d_1_pins[] = { AB9540_PIN_D19 };
+/* audio data interface 1*/
+static const unsigned adi1_d_1_pins[] = { AB9540_PIN_R3, AB9540_PIN_T2,
+                                       AB9540_PIN_U2, AB9540_PIN_V2 };
+/* USBUICC */
+static const unsigned usbuicc_d_1_pins[] = { AB9540_PIN_N17, AB9540_PIN_N16,
+                                       AB9540_PIN_M19 };
+static const unsigned sysclkreq7_d_1_pins[] = { AB9540_PIN_T3 };
+static const unsigned sysclkreq8_d_1_pins[] = { AB9540_PIN_W2 };
+/* Digital microphone 1 and 2 */
+static const unsigned dmic12_d_1_pins[] = { AB9540_PIN_H4, AB9540_PIN_F1 };
+/* Digital microphone 3 and 4 */
+static const unsigned dmic34_d_1_pins[] = { AB9540_PIN_F4, AB9540_PIN_F2 };
+/* Digital microphone 5 and 6 */
+static const unsigned dmic56_d_1_pins[] = { AB9540_PIN_E4, AB9540_PIN_F3 };
+static const unsigned extcpena_d_1_pins[] = { AB9540_PIN_J13 };
+/* modem SDA/SCL */
+static const unsigned modsclsda_d_1_pins[] = { AB9540_PIN_L17, AB9540_PIN_L16 };
+static const unsigned sysclkreq5_d_1_pins[] = { AB9540_PIN_W3 };
+static const unsigned gpio50_d_1_pins[] = { AB9540_PIN_N4 };
+static const unsigned batremn_d_1_pins[] = { AB9540_PIN_G12 };
+static const unsigned resethw_d_1_pins[] = { AB9540_PIN_E17 };
+static const unsigned service_d_1_pins[] = { AB9540_PIN_D11 };
+static const unsigned gpio60_d_1_pins[] = { AB9540_PIN_M18 };
+
+/* Altfunction A column */
+static const unsigned gpio1_a_1_pins[] = { AB9540_PIN_R4 };
+static const unsigned gpio2_a_1_pins[] = { AB9540_PIN_V3 };
+static const unsigned gpio3_a_1_pins[] = { AB9540_PIN_T4 };
+static const unsigned gpio4_a_1_pins[] = { AB9540_PIN_T5 };
+static const unsigned hiqclkena_a_1_pins[] = { AB9540_PIN_B18 };
+static const unsigned pdmclk_a_1_pins[] = { AB9540_PIN_C18 };
+static const unsigned uartdata_a_1_pins[] = { AB9540_PIN_D18, AB9540_PIN_N4 };
+static const unsigned gpio14_a_1_pins[] = { AB9540_PIN_B19 };
+static const unsigned gpio15_a_1_pins[] = { AB9540_PIN_C19 };
+static const unsigned gpio16_a_1_pins[] = { AB9540_PIN_D19 };
+static const unsigned gpio17_a_1_pins[] = { AB9540_PIN_R3 };
+static const unsigned gpio18_a_1_pins[] = { AB9540_PIN_T2 };
+static const unsigned gpio19_a_1_pins[] = { AB9540_PIN_U2 };
+static const unsigned gpio20_a_1_pins[] = { AB9540_PIN_V2 };
+static const unsigned gpio21_a_1_pins[] = { AB9540_PIN_N17 };
+static const unsigned gpio22_a_1_pins[] = { AB9540_PIN_N16 };
+static const unsigned gpio23_a_1_pins[] = { AB9540_PIN_M19 };
+static const unsigned gpio24_a_1_pins[] = { AB9540_PIN_T3 };
+static const unsigned gpio25_a_1_pins[] = { AB9540_PIN_W2 };
+static const unsigned gpio27_a_1_pins[] = { AB9540_PIN_H4 };
+static const unsigned gpio28_a_1_pins[] = { AB9540_PIN_F1 };
+static const unsigned gpio29_a_1_pins[] = { AB9540_PIN_F4 };
+static const unsigned gpio30_a_1_pins[] = { AB9540_PIN_F2 };
+static const unsigned gpio31_a_1_pins[] = { AB9540_PIN_E4 };
+static const unsigned gpio32_a_1_pins[] = { AB9540_PIN_F3 };
+static const unsigned gpio34_a_1_pins[] = { AB9540_PIN_J13 };
+static const unsigned gpio40_a_1_pins[] = { AB9540_PIN_L17 };
+static const unsigned gpio41_a_1_pins[] = { AB9540_PIN_L16 };
+static const unsigned gpio42_a_1_pins[] = { AB9540_PIN_W3 };
+static const unsigned gpio51_a_1_pins[] = { AB9540_PIN_G12 };
+static const unsigned gpio52_a_1_pins[] = { AB9540_PIN_E17 };
+static const unsigned gpio53_a_1_pins[] = { AB9540_PIN_D11 };
+static const unsigned usbuiccpd_a_1_pins[] = { AB9540_PIN_M18 };
+
+/* Altfunction B colum */
+static const unsigned pdmdata_b_1_pins[] = { AB9540_PIN_B18 };
+static const unsigned pwmextvibra1_b_1_pins[] = { AB9540_PIN_D18 };
+static const unsigned pwmextvibra2_b_1_pins[] = { AB9540_PIN_N4 };
+
+/* Altfunction C column */
+static const unsigned usbvdat_c_1_pins[] = { AB9540_PIN_D18 };
+
+#define AB9540_PIN_GROUP(a, b) { .name = #a, .pins = a##_pins,         \
+                       .npins = ARRAY_SIZE(a##_pins), .altsetting = b }
+
+static const struct abx500_pingroup ab9540_groups[] = {
+       /* default column */
+       AB9540_PIN_GROUP(sysclkreq2_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(sysclkreq3_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(sysclkreq4_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(sysclkreq6_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(gpio10_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(gpio11_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(gpio13_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(pwmout1_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(pwmout2_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(pwmout3_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(adi1_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(usbuicc_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(sysclkreq7_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(sysclkreq8_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(dmic12_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(dmic34_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(dmic56_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(extcpena_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(modsclsda_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(sysclkreq5_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(gpio50_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(batremn_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(resethw_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(service_d_1, ABX500_DEFAULT),
+       AB9540_PIN_GROUP(gpio60_d_1, ABX500_DEFAULT),
+
+       /* Altfunction A column */
+       AB9540_PIN_GROUP(gpio1_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio2_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio3_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio4_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(hiqclkena_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(pdmclk_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(uartdata_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio14_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio15_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio16_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio17_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio18_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio19_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio20_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio21_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio22_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio23_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio24_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio25_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio27_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio28_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio29_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio30_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio31_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio32_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio34_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio40_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio41_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio42_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio51_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio52_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(gpio53_a_1, ABX500_ALT_A),
+       AB9540_PIN_GROUP(usbuiccpd_a_1, ABX500_ALT_A),
+
+       /* Altfunction B column */
+       AB9540_PIN_GROUP(pdmdata_b_1, ABX500_ALT_B),
+       AB9540_PIN_GROUP(pwmextvibra1_b_1, ABX500_ALT_B),
+       AB9540_PIN_GROUP(pwmextvibra2_b_1, ABX500_ALT_B),
+
+       /* Altfunction C column */
+       AB9540_PIN_GROUP(usbvdat_c_1, ABX500_ALT_C),
+};
+
+/* We use this macro to define the groups applicable to a function */
+#define AB9540_FUNC_GROUPS(a, b...)       \
+static const char * const a##_groups[] = { b };
+
+AB9540_FUNC_GROUPS(sysclkreq, "sysclkreq2_d_1", "sysclkreq3_d_1",
+               "sysclkreq4_d_1", "sysclkreq5_d_1", "sysclkreq6_d_1",
+               "sysclkreq7_d_1", "sysclkreq8_d_1");
+AB9540_FUNC_GROUPS(gpio, "gpio1_a_1", "gpio2_a_1", "gpio3_a_1", "gpio4_a_1",
+               "gpio10_d_1", "gpio11_d_1", "gpio13_d_1", "gpio14_a_1",
+               "gpio15_a_1", "gpio16_a_1", "gpio17_a_1", "gpio18_a_1",
+               "gpio19_a_1", "gpio20_a_1", "gpio21_a_1", "gpio22_a_1",
+               "gpio23_a_1", "gpio24_a_1", "gpio25_a_1", "gpio27_a_1",
+               "gpio28_a_1", "gpio29_a_1", "gpio30_a_1", "gpio31_a_1",
+               "gpio32_a_1", "gpio34_a_1", "gpio40_a_1", "gpio41_a_1",
+               "gpio42_a_1", "gpio50_d_1", "gpio51_a_1", "gpio52_a_1",
+               "gpio53_a_1", "gpio60_d_1");
+AB9540_FUNC_GROUPS(pwmout, "pwmout1_d_1", "pwmout2_d_1", "pwmout3_d_1");
+AB9540_FUNC_GROUPS(adi1, "adi1_d_1");
+AB9540_FUNC_GROUPS(usbuicc, "usbuicc_d_1", "usbuiccpd_a_1");
+AB9540_FUNC_GROUPS(dmic, "dmic12_d_1", "dmic34_d_1", "dmic56_d_1");
+AB9540_FUNC_GROUPS(extcpena, "extcpena_d_1");
+AB9540_FUNC_GROUPS(modsclsda, "modsclsda_d_1");
+AB9540_FUNC_GROUPS(batremn, "batremn_d_1");
+AB9540_FUNC_GROUPS(resethw, "resethw_d_1");
+AB9540_FUNC_GROUPS(service, "service_d_1");
+AB9540_FUNC_GROUPS(hiqclkena, "hiqclkena_a_1");
+AB9540_FUNC_GROUPS(pdm, "pdmdata_b_1", "pdmclk_a_1");
+AB9540_FUNC_GROUPS(uartdata, "uartdata_a_1");
+AB9540_FUNC_GROUPS(pwmextvibra, "pwmextvibra1_b_1", "pwmextvibra2_b_1");
+AB9540_FUNC_GROUPS(usbvdat, "usbvdat_c_1");
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct abx500_function ab9540_functions[] = {
+       FUNCTION(sysclkreq),
+       FUNCTION(gpio),
+       FUNCTION(pwmout),
+       FUNCTION(adi1),
+       FUNCTION(usbuicc),
+       FUNCTION(dmic),
+       FUNCTION(extcpena),
+       FUNCTION(modsclsda),
+       FUNCTION(batremn),
+       FUNCTION(resethw),
+       FUNCTION(service),
+       FUNCTION(hiqclkena),
+       FUNCTION(pdm),
+       FUNCTION(uartdata),
+       FUNCTION(pwmextvibra),
+       FUNCTION(usbvdat),
+};
+
+/*
+ * this table translates what's is in the AB9540 specification regarding the
+ * balls alternate functions (as for DB, default, ALT_A, ALT_B and ALT_C).
+ * ALTERNATE_FUNCTIONS(GPIO_NUMBER, GPIOSEL bit, ALTERNATFUNC bit1,
+ * ALTERNATEFUNC bit2, ALTA val, ALTB val, ALTC val),
+ *
+ * example :
+ *
+ *     ALTERNATE_FUNCTIONS(13,     4,      3,      4, 1, 0, 2),
+ *     means that pin AB9540_PIN_D18 (pin 13) supports 4 mux (default/ALT_A,
+ *     ALT_B and ALT_C), so GPIOSEL and ALTERNATFUNC registers are used to
+ *     select the mux. ALTA, ALTB and ALTC val indicates values to write in
+ *     ALTERNATFUNC register. We need to specifies these values as SOC
+ *     designers didn't apply the same logic on how to select mux in the
+ *     ABx500 family.
+ *
+ *     As this pins supports at least ALT_B mux, default mux is
+ *     selected by writing 1 in GPIOSEL bit :
+ *
+ *             | GPIOSEL bit=4 | alternatfunc bit2=4 | alternatfunc bit1=3
+ *     default |       1       |          0          |          0
+ *     alt_A   |       0       |          0          |          1
+ *     alt_B   |       0       |          0          |          0
+ *     alt_C   |       0       |          1          |          0
+ *
+ *     ALTERNATE_FUNCTIONS(1,      0, UNUSED, UNUSED),
+ *     means that pin AB9540_PIN_R4 (pin 1) supports 2 mux, so only GPIOSEL
+ *     register is used to select the mux. As this pins doesn't support at
+ *     least ALT_B mux, default mux is by writing 0 in GPIOSEL bit :
+ *
+ *             | GPIOSEL bit=0 | alternatfunc bit2=  | alternatfunc bit1=
+ *     default |       0       |          0          |          0
+ *     alt_A   |       1       |          0          |          0
+ */
+
+struct alternate_functions ab9540alternate_functions[AB9540_GPIO_MAX_NUMBER + 1] = {
+       /* GPIOSEL1 - bits 4-7 are reserved */
+       ALTERNATE_FUNCTIONS(0, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO0 */
+       ALTERNATE_FUNCTIONS(1,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO1, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(2,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO2, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(3,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO3, altA controlled by bit 2*/
+       ALTERNATE_FUNCTIONS(4,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO4, altA controlled by bit 3*/
+       ALTERNATE_FUNCTIONS(5, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO5 */
+       ALTERNATE_FUNCTIONS(6, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO6 */
+       ALTERNATE_FUNCTIONS(7, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO7 */
+       ALTERNATE_FUNCTIONS(8, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO8 */
+       /* GPIOSEL2 - bits 0 and 3 are reserved */
+       ALTERNATE_FUNCTIONS(9, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO9 */
+       ALTERNATE_FUNCTIONS(10,      1,      0, UNUSED, 1, 0, 0), /* GPIO10, altA and altB controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(11,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO11, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(12, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO12 */
+       ALTERNATE_FUNCTIONS(13,      4,      3,      4, 1, 0, 2), /* GPIO13, altA altB and altC controlled by bit 3 and 4 */
+       ALTERNATE_FUNCTIONS(14,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO14, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(15,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO15, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(16,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO16, altA controlled by bit 7 */
+       /* GPIOSEL3 - bit 1-3 reserved
+        * pins 17 to 20 are special case, only bit 0 is used to select
+        * alternate function for these 4 pins.
+        * bits 1 to 3 are reserved
+        */
+       ALTERNATE_FUNCTIONS(17,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO17, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(18,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO18, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(19,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO19, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(20,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO20, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(21,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO21, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(22,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO22, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(23,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO23, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(24,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO24, altA controlled by bit 7 */
+       /* GPIOSEL4 - bit 1 reserved */
+       ALTERNATE_FUNCTIONS(25,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO25, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(26, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO26 */
+       ALTERNATE_FUNCTIONS(27,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO27, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(28,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO28, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(29,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO29, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(30,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO30, altA controlled by bit 5 */
+       ALTERNATE_FUNCTIONS(31,      6, UNUSED, UNUSED, 0, 0, 0), /* GPIO31, altA controlled by bit 6 */
+       ALTERNATE_FUNCTIONS(32,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO32, altA controlled by bit 7 */
+       /* GPIOSEL5 - bit 0, 2-6 are reserved */
+       ALTERNATE_FUNCTIONS(33, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO33 */
+       ALTERNATE_FUNCTIONS(34,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO34, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(35, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO35 */
+       ALTERNATE_FUNCTIONS(36, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO36 */
+       ALTERNATE_FUNCTIONS(37, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO37 */
+       ALTERNATE_FUNCTIONS(38, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO38 */
+       ALTERNATE_FUNCTIONS(39, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO39 */
+       ALTERNATE_FUNCTIONS(40,      7, UNUSED, UNUSED, 0, 0, 0), /* GPIO40, altA controlled by bit 7 */
+       /* GPIOSEL6 - bit 2-7 are reserved */
+       ALTERNATE_FUNCTIONS(41,      0, UNUSED, UNUSED, 0, 0, 0), /* GPIO41, altA controlled by bit 0 */
+       ALTERNATE_FUNCTIONS(42,      1, UNUSED, UNUSED, 0, 0, 0), /* GPIO42, altA controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(43, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO43 */
+       ALTERNATE_FUNCTIONS(44, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO44 */
+       ALTERNATE_FUNCTIONS(45, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO45 */
+       ALTERNATE_FUNCTIONS(46, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO46 */
+       ALTERNATE_FUNCTIONS(47, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO47 */
+       ALTERNATE_FUNCTIONS(48, UNUSED, UNUSED, UNUSED, 0, 0, 0), /* no GPIO48 */
+       /*
+        * GPIOSEL7 - bit 0 and 6-7 are reserved
+        * special case with GPIO60, wich is located at offset 5 of gpiosel7
+        * don't know why it has been called GPIO60 in AB9540 datasheet,
+        * GPIO54 would be logical..., so at SOC point of view we consider
+        * GPIO60 = GPIO54
+        */
+       ALTERNATE_FUNCTIONS(49,      0, UNUSED, UNUSED, 0, 0, 0), /* no GPIO49 */
+       ALTERNATE_FUNCTIONS(50,      1,      2, UNUSED, 1, 0, 0), /* GPIO50, altA and altB controlled by bit 1 */
+       ALTERNATE_FUNCTIONS(51,      2, UNUSED, UNUSED, 0, 0, 0), /* GPIO51, altA controlled by bit 2 */
+       ALTERNATE_FUNCTIONS(52,      3, UNUSED, UNUSED, 0, 0, 0), /* GPIO52, altA controlled by bit 3 */
+       ALTERNATE_FUNCTIONS(53,      4, UNUSED, UNUSED, 0, 0, 0), /* GPIO53, altA controlled by bit 4 */
+       ALTERNATE_FUNCTIONS(54,      5, UNUSED, UNUSED, 0, 0, 0), /* GPIO54 = GPIO60, altA controlled by bit 5 */
+};
+
+struct abx500_gpio_irq_cluster ab9540_gpio_irq_cluster[] = {
+       GPIO_IRQ_CLUSTER(10, 13, AB8500_INT_GPIO10R),
+       GPIO_IRQ_CLUSTER(24, 25, AB8500_INT_GPIO24R),
+       GPIO_IRQ_CLUSTER(40, 41, AB8500_INT_GPIO40R),
+       GPIO_IRQ_CLUSTER(50, 54, AB9540_INT_GPIO50R),
+};
+
+static struct abx500_pinctrl_soc_data ab9540_soc = {
+       .gpio_ranges = ab9540_pinranges,
+       .gpio_num_ranges = ARRAY_SIZE(ab9540_pinranges),
+       .pins = ab9540_pins,
+       .npins = ARRAY_SIZE(ab9540_pins),
+       .functions = ab9540_functions,
+       .nfunctions = ARRAY_SIZE(ab9540_functions),
+       .groups = ab9540_groups,
+       .ngroups = ARRAY_SIZE(ab9540_groups),
+       .alternate_functions = ab9540alternate_functions,
+       .gpio_irq_cluster = ab9540_gpio_irq_cluster,
+       .ngpio_irq_cluster = ARRAY_SIZE(ab9540_gpio_irq_cluster),
+       .irq_gpio_rising_offset = AB8500_INT_GPIO6R,
+       .irq_gpio_falling_offset = AB8500_INT_GPIO6F,
+       .irq_gpio_factor = 1,
+};
+
+void
+abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc)
+{
+       *soc = &ab9540_soc;
+}
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
new file mode 100644 (file)
index 0000000..caecdd3
--- /dev/null
@@ -0,0 +1,1012 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2013
+ *
+ * Author: Patrice Chotard <patrice.chotard@st.com>
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+#include "pinctrl-abx500.h"
+
+/*
+ * The AB9540 and AB8540 GPIO support are extended versions
+ * of the AB8500 GPIO support.
+ * The AB9540 supports an additional (7th) register so that
+ * more GPIO may be configured and used.
+ * The AB8540 supports 4 new gpios (GPIOx_VBAT) that have
+ * internal pull-up and pull-down capabilities.
+ */
+
+/*
+ * GPIO registers offset
+ * Bank: 0x10
+ */
+#define AB8500_GPIO_SEL1_REG   0x00
+#define AB8500_GPIO_SEL2_REG   0x01
+#define AB8500_GPIO_SEL3_REG   0x02
+#define AB8500_GPIO_SEL4_REG   0x03
+#define AB8500_GPIO_SEL5_REG   0x04
+#define AB8500_GPIO_SEL6_REG   0x05
+#define AB9540_GPIO_SEL7_REG   0x06
+
+#define AB8500_GPIO_DIR1_REG   0x10
+#define AB8500_GPIO_DIR2_REG   0x11
+#define AB8500_GPIO_DIR3_REG   0x12
+#define AB8500_GPIO_DIR4_REG   0x13
+#define AB8500_GPIO_DIR5_REG   0x14
+#define AB8500_GPIO_DIR6_REG   0x15
+#define AB9540_GPIO_DIR7_REG   0x16
+
+#define AB8500_GPIO_OUT1_REG   0x20
+#define AB8500_GPIO_OUT2_REG   0x21
+#define AB8500_GPIO_OUT3_REG   0x22
+#define AB8500_GPIO_OUT4_REG   0x23
+#define AB8500_GPIO_OUT5_REG   0x24
+#define AB8500_GPIO_OUT6_REG   0x25
+#define AB9540_GPIO_OUT7_REG   0x26
+
+#define AB8500_GPIO_PUD1_REG   0x30
+#define AB8500_GPIO_PUD2_REG   0x31
+#define AB8500_GPIO_PUD3_REG   0x32
+#define AB8500_GPIO_PUD4_REG   0x33
+#define AB8500_GPIO_PUD5_REG   0x34
+#define AB8500_GPIO_PUD6_REG   0x35
+#define AB9540_GPIO_PUD7_REG   0x36
+
+#define AB8500_GPIO_IN1_REG    0x40
+#define AB8500_GPIO_IN2_REG    0x41
+#define AB8500_GPIO_IN3_REG    0x42
+#define AB8500_GPIO_IN4_REG    0x43
+#define AB8500_GPIO_IN5_REG    0x44
+#define AB8500_GPIO_IN6_REG    0x45
+#define AB9540_GPIO_IN7_REG    0x46
+#define AB8540_GPIO_VINSEL_REG 0x47
+#define AB8540_GPIO_PULL_UPDOWN_REG    0x48
+#define AB8500_GPIO_ALTFUN_REG 0x50
+#define AB8540_GPIO_PULL_UPDOWN_MASK   0x03
+#define AB8540_GPIO_VINSEL_MASK        0x03
+#define AB8540_GPIOX_VBAT_START        51
+#define AB8540_GPIOX_VBAT_END  54
+
+struct abx500_pinctrl {
+       struct device *dev;
+       struct pinctrl_dev *pctldev;
+       struct abx500_pinctrl_soc_data *soc;
+       struct gpio_chip chip;
+       struct ab8500 *parent;
+       struct mutex lock;
+       struct abx500_gpio_irq_cluster *irq_cluster;
+       int irq_cluster_size;
+};
+
+/**
+ * to_abx500_pinctrl() - get the pointer to abx500_pinctrl
+ * @chip:      Member of the structure abx500_pinctrl
+ */
+static inline struct abx500_pinctrl *to_abx500_pinctrl(struct gpio_chip *chip)
+{
+       return container_of(chip, struct abx500_pinctrl, chip);
+}
+
+static int abx500_gpio_get_bit(struct gpio_chip *chip, u8 reg,
+                              unsigned offset, bool *bit)
+{
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       u8 pos = offset % 8;
+       u8 val;
+       int ret;
+
+       reg += offset / 8;
+       ret = abx500_get_register_interruptible(pct->dev,
+                                               AB8500_MISC, reg, &val);
+
+       *bit = !!(val & BIT(pos));
+
+       if (ret < 0)
+               dev_err(pct->dev,
+                       "%s read reg =%x, offset=%x failed\n",
+                       __func__, reg, offset);
+
+       return ret;
+}
+
+static int abx500_gpio_set_bits(struct gpio_chip *chip, u8 reg,
+                               unsigned offset, int val)
+{
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       u8 pos = offset % 8;
+       int ret;
+
+       reg += offset / 8;
+       ret = abx500_mask_and_set_register_interruptible(pct->dev,
+                               AB8500_MISC, reg, BIT(pos), val << pos);
+       if (ret < 0)
+               dev_err(pct->dev, "%s write failed\n", __func__);
+
+       return ret;
+}
+
+/**
+ * abx500_gpio_get() - Get the particular GPIO value
+ * @chip:      Gpio device
+ * @offset:    GPIO number to read
+ */
+static int abx500_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       bool bit;
+       int ret;
+
+       ret = abx500_gpio_get_bit(chip, AB8500_GPIO_IN1_REG,
+                                 offset, &bit);
+       if (ret < 0) {
+               dev_err(pct->dev, "%s failed\n", __func__);
+               return ret;
+       }
+
+       return bit;
+}
+
+static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+{
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       int ret;
+
+       ret = abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
+       if (ret < 0)
+               dev_err(pct->dev, "%s write failed\n", __func__);
+}
+
+static int abx500_config_pull_updown(struct abx500_pinctrl *pct,
+                                    int offset, enum abx500_gpio_pull_updown val)
+{
+       u8 pos;
+       int ret;
+       struct pullud *pullud;
+
+       if (!pct->soc->pullud) {
+               dev_err(pct->dev, "%s AB chip doesn't support pull up/down feature",
+                               __func__);
+               ret = -EPERM;
+               goto out;
+       }
+
+       pullud = pct->soc->pullud;
+
+       if ((offset < pullud->first_pin)
+               || (offset > pullud->last_pin)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       pos = offset << 1;
+
+       ret = abx500_mask_and_set_register_interruptible(pct->dev,
+                       AB8500_MISC, AB8540_GPIO_PULL_UPDOWN_REG,
+                       AB8540_GPIO_PULL_UPDOWN_MASK << pos, val << pos);
+
+out:
+       if (ret < 0)
+               dev_err(pct->dev, "%s failed (%d)\n", __func__, ret);
+
+       return ret;
+}
+
+static int abx500_gpio_direction_output(struct gpio_chip *chip,
+                                       unsigned offset,
+                                       int val)
+{
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       struct pullud *pullud = pct->soc->pullud;
+       unsigned gpio;
+       int ret;
+
+       /* set direction as output */
+       ret = abx500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 1);
+       if (ret < 0)
+               return ret;
+
+       /* disable pull down */
+       ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG, offset, 1);
+       if (ret < 0)
+               return ret;
+
+       /* if supported, disable both pull down and pull up */
+       gpio = offset + 1;
+       if (pullud && gpio >= pullud->first_pin && gpio <= pullud->last_pin) {
+               ret = abx500_config_pull_updown(pct,
+                               gpio,
+                               ABX500_GPIO_PULL_NONE);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /* set the output as 1 or 0 */
+       return abx500_gpio_set_bits(chip, AB8500_GPIO_OUT1_REG, offset, val);
+}
+
+static int abx500_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+       /* set the register as input */
+       return abx500_gpio_set_bits(chip, AB8500_GPIO_DIR1_REG, offset, 0);
+}
+
+static int abx500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       /* The AB8500 GPIO numbers are off by one */
+       int gpio = offset + 1;
+       int hwirq;
+       int i;
+
+       for (i = 0; i < pct->irq_cluster_size; i++) {
+               struct abx500_gpio_irq_cluster *cluster =
+                       &pct->irq_cluster[i];
+
+               if (gpio >= cluster->start && gpio <= cluster->end) {
+                       /*
+                        * The ABx500 GPIO's associated IRQs are clustered together
+                        * throughout the interrupt numbers at irregular intervals.
+                        * To solve this quandry, we have placed the read-in values
+                        * into the cluster information table.
+                        */
+                       hwirq = gpio - cluster->start + cluster->to_irq;
+                       return irq_create_mapping(pct->parent->domain, hwirq);
+               }
+       }
+
+       return -EINVAL;
+}
+
+static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
+                          unsigned gpio, int alt_setting)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       struct alternate_functions af = pct->soc->alternate_functions[gpio];
+       int ret;
+       int val;
+       unsigned offset;
+
+       const char *modes[] = {
+               [ABX500_DEFAULT]        = "default",
+               [ABX500_ALT_A]          = "altA",
+               [ABX500_ALT_B]          = "altB",
+               [ABX500_ALT_C]          = "altC",
+       };
+
+       /* sanity check */
+       if (((alt_setting == ABX500_ALT_A) && (af.gpiosel_bit == UNUSED)) ||
+           ((alt_setting == ABX500_ALT_B) && (af.alt_bit1 == UNUSED)) ||
+           ((alt_setting == ABX500_ALT_C) && (af.alt_bit2 == UNUSED))) {
+               dev_dbg(pct->dev, "pin %d doesn't support %s mode\n", gpio,
+                               modes[alt_setting]);
+               return -EINVAL;
+       }
+
+       /* on ABx5xx, there is no GPIO0, so adjust the offset */
+       offset = gpio - 1;
+
+       switch (alt_setting) {
+       case ABX500_DEFAULT:
+               /*
+                * for ABx5xx family, default mode is always selected by
+                * writing 0 to GPIOSELx register, except for pins which
+                * support at least ALT_B mode, default mode is selected
+                * by writing 1 to GPIOSELx register
+                */
+               val = 0;
+               if (af.alt_bit1 != UNUSED)
+                       val++;
+
+               ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+                                          offset, val);
+               break;
+
+       case ABX500_ALT_A:
+               /*
+                * for ABx5xx family, alt_a mode is always selected by
+                * writing 1 to GPIOSELx register, except for pins which
+                * support at least ALT_B mode, alt_a mode is selected
+                * by writing 0 to GPIOSELx register and 0 in ALTFUNC
+                * register
+                */
+               if (af.alt_bit1 != UNUSED) {
+                       ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+                                       offset, 0);
+                       ret = abx500_gpio_set_bits(chip,
+                                       AB8500_GPIO_ALTFUN_REG,
+                                       af.alt_bit1,
+                                       !!(af.alta_val && BIT(0)));
+                       if (af.alt_bit2 != UNUSED)
+                               ret = abx500_gpio_set_bits(chip,
+                                       AB8500_GPIO_ALTFUN_REG,
+                                       af.alt_bit2,
+                                       !!(af.alta_val && BIT(1)));
+               } else
+                       ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+                                       offset, 1);
+               break;
+
+       case ABX500_ALT_B:
+               ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+                               offset, 0);
+               ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
+                               af.alt_bit1, !!(af.altb_val && BIT(0)));
+               if (af.alt_bit2 != UNUSED)
+                       ret = abx500_gpio_set_bits(chip,
+                                       AB8500_GPIO_ALTFUN_REG,
+                                       af.alt_bit2,
+                                       !!(af.altb_val && BIT(1)));
+               break;
+
+       case ABX500_ALT_C:
+               ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
+                               offset, 0);
+               ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
+                               af.alt_bit2, !!(af.altc_val && BIT(0)));
+               ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
+                               af.alt_bit2, !!(af.altc_val && BIT(1)));
+               break;
+
+       default:
+               dev_dbg(pct->dev, "unknow alt_setting %d\n", alt_setting);
+
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
+                         unsigned gpio)
+{
+       u8 mode;
+       bool bit_mode;
+       bool alt_bit1;
+       bool alt_bit2;
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       struct alternate_functions af = pct->soc->alternate_functions[gpio];
+       /* on ABx5xx, there is no GPIO0, so adjust the offset */
+       unsigned offset = gpio - 1;
+
+       /*
+        * if gpiosel_bit is set to unused,
+        * it means no GPIO or special case
+        */
+       if (af.gpiosel_bit == UNUSED)
+               return ABX500_DEFAULT;
+
+       /* read GpioSelx register */
+       abx500_gpio_get_bit(chip, AB8500_GPIO_SEL1_REG + (offset / 8),
+                       af.gpiosel_bit, &bit_mode);
+       mode = bit_mode;
+
+       /* sanity check */
+       if ((af.alt_bit1 < UNUSED) || (af.alt_bit1 > 7) ||
+           (af.alt_bit2 < UNUSED) || (af.alt_bit2 > 7)) {
+               dev_err(pct->dev,
+                       "alt_bitX value not in correct range (-1 to 7)\n");
+               return -EINVAL;
+       }
+
+       /* if alt_bit2 is used, alt_bit1 must be used too */
+       if ((af.alt_bit2 != UNUSED) && (af.alt_bit1 == UNUSED)) {
+               dev_err(pct->dev,
+                       "if alt_bit2 is used, alt_bit1 can't be unused\n");
+               return -EINVAL;
+       }
+
+       /* check if pin use AlternateFunction register */
+       if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED))
+               return mode;
+       /*
+        * if pin GPIOSEL bit is set and pin supports alternate function,
+        * it means DEFAULT mode
+        */
+       if (mode)
+               return ABX500_DEFAULT;
+
+       /*
+        * pin use the AlternatFunction register
+        * read alt_bit1 value
+        */
+       abx500_gpio_get_bit(chip, AB8500_GPIO_ALTFUN_REG,
+                           af.alt_bit1, &alt_bit1);
+
+       if (af.alt_bit2 != UNUSED)
+               /* read alt_bit2 value */
+               abx500_gpio_get_bit(chip, AB8500_GPIO_ALTFUN_REG, af.alt_bit2,
+                               &alt_bit2);
+       else
+               alt_bit2 = 0;
+
+       mode = (alt_bit2 << 1) + alt_bit1;
+       if (mode == af.alta_val)
+               return ABX500_ALT_A;
+       else if (mode == af.altb_val)
+               return ABX500_ALT_B;
+       else
+               return ABX500_ALT_C;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+static void abx500_gpio_dbg_show_one(struct seq_file *s,
+                                    struct pinctrl_dev *pctldev,
+                                    struct gpio_chip *chip,
+                                    unsigned offset, unsigned gpio)
+{
+       const char *label = gpiochip_is_requested(chip, offset - 1);
+       u8 gpio_offset = offset - 1;
+       int mode = -1;
+       bool is_out;
+       bool pull;
+
+       const char *modes[] = {
+               [ABX500_DEFAULT]        = "default",
+               [ABX500_ALT_A]          = "altA",
+               [ABX500_ALT_B]          = "altB",
+               [ABX500_ALT_C]          = "altC",
+       };
+
+       abx500_gpio_get_bit(chip, AB8500_GPIO_DIR1_REG, gpio_offset, &is_out);
+       abx500_gpio_get_bit(chip, AB8500_GPIO_PUD1_REG, gpio_offset, &pull);
+
+       if (pctldev)
+               mode = abx500_get_mode(pctldev, chip, offset);
+
+       seq_printf(s, " gpio-%-3d (%-20.20s) %-3s %-9s %s",
+                  gpio, label ?: "(none)",
+                  is_out ? "out" : "in ",
+                  is_out ?
+                  (chip->get
+                  ? (chip->get(chip, offset) ? "hi" : "lo")
+                  : "?  ")
+                  : (pull ? "pull up" : "pull down"),
+                  (mode < 0) ? "unknown" : modes[mode]);
+}
+
+static void abx500_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+       unsigned i;
+       unsigned gpio = chip->base;
+       struct abx500_pinctrl *pct = to_abx500_pinctrl(chip);
+       struct pinctrl_dev *pctldev = pct->pctldev;
+
+       for (i = 0; i < chip->ngpio; i++, gpio++) {
+               /* On AB8500, there is no GPIO0, the first is the GPIO 1 */
+               abx500_gpio_dbg_show_one(s, pctldev, chip, i + 1, gpio);
+               seq_printf(s, "\n");
+       }
+}
+
+#else
+static inline void abx500_gpio_dbg_show_one(struct seq_file *s,
+                                           struct pinctrl_dev *pctldev,
+                                           struct gpio_chip *chip,
+                                           unsigned offset, unsigned gpio)
+{
+}
+#define abx500_gpio_dbg_show   NULL
+#endif
+
+int abx500_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+       int gpio = chip->base + offset;
+
+       return pinctrl_request_gpio(gpio);
+}
+
+void abx500_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+       int gpio = chip->base + offset;
+
+       pinctrl_free_gpio(gpio);
+}
+
+static struct gpio_chip abx500gpio_chip = {
+       .label                  = "abx500-gpio",
+       .owner                  = THIS_MODULE,
+       .request                = abx500_gpio_request,
+       .free                   = abx500_gpio_free,
+       .direction_input        = abx500_gpio_direction_input,
+       .get                    = abx500_gpio_get,
+       .direction_output       = abx500_gpio_direction_output,
+       .set                    = abx500_gpio_set,
+       .to_irq                 = abx500_gpio_to_irq,
+       .dbg_show               = abx500_gpio_dbg_show,
+};
+
+static int abx500_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+       return pct->soc->nfunctions;
+}
+
+static const char *abx500_pmx_get_func_name(struct pinctrl_dev *pctldev,
+                                        unsigned function)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+       return pct->soc->functions[function].name;
+}
+
+static int abx500_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+                                     unsigned function,
+                                     const char * const **groups,
+                                     unsigned * const num_groups)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+       *groups = pct->soc->functions[function].groups;
+       *num_groups = pct->soc->functions[function].ngroups;
+
+       return 0;
+}
+
+static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
+                            unsigned group)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       struct gpio_chip *chip = &pct->chip;
+       const struct abx500_pingroup *g;
+       int i;
+       int ret = 0;
+
+       g = &pct->soc->groups[group];
+       if (g->altsetting < 0)
+               return -EINVAL;
+
+       dev_dbg(pct->dev, "enable group %s, %u pins\n", g->name, g->npins);
+
+       for (i = 0; i < g->npins; i++) {
+               dev_dbg(pct->dev, "setting pin %d to altsetting %d\n",
+                       g->pins[i], g->altsetting);
+
+               ret = abx500_set_mode(pctldev, chip, g->pins[i], g->altsetting);
+       }
+
+       return ret;
+}
+
+static void abx500_pmx_disable(struct pinctrl_dev *pctldev,
+                              unsigned function, unsigned group)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       const struct abx500_pingroup *g;
+
+       g = &pct->soc->groups[group];
+       if (g->altsetting < 0)
+               return;
+
+       /* FIXME: poke out the mux, set the pin to some default state? */
+       dev_dbg(pct->dev, "disable group %s, %u pins\n", g->name, g->npins);
+}
+
+int abx500_gpio_request_enable(struct pinctrl_dev *pctldev,
+                              struct pinctrl_gpio_range *range,
+                              unsigned offset)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       const struct abx500_pinrange *p;
+       int ret;
+       int i;
+
+       /*
+        * Different ranges have different ways to enable GPIO function on a
+        * pin, so refer back to our local range type, where we handily define
+        * what altfunc enables GPIO for a certain pin.
+        */
+       for (i = 0; i < pct->soc->gpio_num_ranges; i++) {
+               p = &pct->soc->gpio_ranges[i];
+               if ((offset >= p->offset) &&
+                   (offset < (p->offset + p->npins)))
+                 break;
+       }
+
+       if (i == pct->soc->gpio_num_ranges) {
+               dev_err(pct->dev, "%s failed to locate range\n", __func__);
+               return -ENODEV;
+       }
+
+       dev_dbg(pct->dev, "enable GPIO by altfunc %d at gpio %d\n",
+               p->altfunc, offset);
+
+       ret = abx500_set_mode(pct->pctldev, &pct->chip,
+                             offset, p->altfunc);
+       if (ret < 0) {
+               dev_err(pct->dev, "%s setting altfunc failed\n", __func__);
+               return ret;
+       }
+
+       return ret;
+}
+
+static void abx500_gpio_disable_free(struct pinctrl_dev *pctldev,
+                                    struct pinctrl_gpio_range *range,
+                                    unsigned offset)
+{
+}
+
+static struct pinmux_ops abx500_pinmux_ops = {
+       .get_functions_count = abx500_pmx_get_funcs_cnt,
+       .get_function_name = abx500_pmx_get_func_name,
+       .get_function_groups = abx500_pmx_get_func_groups,
+       .enable = abx500_pmx_enable,
+       .disable = abx500_pmx_disable,
+       .gpio_request_enable = abx500_gpio_request_enable,
+       .gpio_disable_free = abx500_gpio_disable_free,
+};
+
+static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+       return pct->soc->ngroups;
+}
+
+static const char *abx500_get_group_name(struct pinctrl_dev *pctldev,
+                                        unsigned selector)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+       return pct->soc->groups[selector].name;
+}
+
+static int abx500_get_group_pins(struct pinctrl_dev *pctldev,
+                                unsigned selector,
+                                const unsigned **pins,
+                                unsigned *num_pins)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+
+       *pins = pct->soc->groups[selector].pins;
+       *num_pins = pct->soc->groups[selector].npins;
+
+       return 0;
+}
+
+static void abx500_pin_dbg_show(struct pinctrl_dev *pctldev,
+                               struct seq_file *s, unsigned offset)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       struct gpio_chip *chip = &pct->chip;
+
+       abx500_gpio_dbg_show_one(s, pctldev, chip, offset,
+                                chip->base + offset - 1);
+}
+
+static struct pinctrl_ops abx500_pinctrl_ops = {
+       .get_groups_count = abx500_get_groups_cnt,
+       .get_group_name = abx500_get_group_name,
+       .get_group_pins = abx500_get_group_pins,
+       .pin_dbg_show = abx500_pin_dbg_show,
+};
+
+int abx500_pin_config_get(struct pinctrl_dev *pctldev,
+                         unsigned pin,
+                         unsigned long *config)
+{
+       return -ENOSYS;
+}
+
+int abx500_pin_config_set(struct pinctrl_dev *pctldev,
+                         unsigned pin,
+                         unsigned long config)
+{
+       struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
+       struct pullud *pullud = pct->soc->pullud;
+       struct gpio_chip *chip = &pct->chip;
+       unsigned offset;
+       int ret;
+       enum pin_config_param param = pinconf_to_config_param(config);
+       enum pin_config_param argument = pinconf_to_config_argument(config);
+
+       dev_dbg(chip->dev, "pin %d [%#lx]: %s %s\n",
+               pin, config, (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
+               (param == PIN_CONFIG_OUTPUT) ? (argument ? "high" : "low") :
+               (argument ? "pull up" : "pull down"));
+
+       /* on ABx500, there is no GPIO0, so adjust the offset */
+       offset = pin - 1;
+
+       switch (param) {
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+               /*
+                * if argument = 1 set the pull down
+                * else clear the pull down
+                */
+               ret = abx500_gpio_direction_input(chip, offset);
+               /*
+                * Some chips only support pull down, while some actually
+                * support both pull up and pull down. Such chips have
+                * a "pullud" range specified for the pins that support
+                * both features. If the pin is not within that range, we
+                * fall back to the old bit set that only support pull down.
+                */
+               if (pullud &&
+                   pin >= pullud->first_pin &&
+                   pin <= pullud->last_pin)
+                       ret = abx500_config_pull_updown(pct,
+                               pin,
+                               argument ? ABX500_GPIO_PULL_DOWN : ABX500_GPIO_PULL_NONE);
+               else
+                       /* Chip only supports pull down */
+                       ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG,
+                               offset, argument ? 0 : 1);
+               break;
+
+       case PIN_CONFIG_OUTPUT:
+               ret = abx500_gpio_direction_output(chip, offset, argument);
+
+               break;
+
+       default:
+               dev_err(chip->dev, "illegal configuration requested\n");
+
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static struct pinconf_ops abx500_pinconf_ops = {
+       .pin_config_get = abx500_pin_config_get,
+       .pin_config_set = abx500_pin_config_set,
+};
+
+static struct pinctrl_desc abx500_pinctrl_desc = {
+       .name = "pinctrl-abx500",
+       .pctlops = &abx500_pinctrl_ops,
+       .pmxops = &abx500_pinmux_ops,
+       .confops = &abx500_pinconf_ops,
+       .owner = THIS_MODULE,
+};
+
+static int abx500_get_gpio_num(struct abx500_pinctrl_soc_data *soc)
+{
+       unsigned int lowest = 0;
+       unsigned int highest = 0;
+       unsigned int npins = 0;
+       int i;
+
+       /*
+        * Compute number of GPIOs from the last SoC gpio range descriptors
+        * These ranges may include "holes" but the GPIO number space shall
+        * still be homogeneous, so we need to detect and account for any
+        * such holes so that these are included in the number of GPIO pins.
+        */
+       for (i = 0; i < soc->gpio_num_ranges; i++) {
+               unsigned gstart;
+               unsigned gend;
+               const struct abx500_pinrange *p;
+
+               p = &soc->gpio_ranges[i];
+               gstart = p->offset;
+               gend = p->offset + p->npins - 1;
+
+               if (i == 0) {
+                       /* First iteration, set start values */
+                       lowest = gstart;
+                       highest = gend;
+               } else {
+                       if (gstart < lowest)
+                               lowest = gstart;
+                       if (gend > highest)
+                               highest = gend;
+               }
+       }
+       /* this gives the absolute number of pins */
+       npins = highest - lowest + 1;
+       return npins;
+}
+
+static const struct of_device_id abx500_gpio_match[] = {
+       { .compatible = "stericsson,ab8500-gpio", .data = (void *)PINCTRL_AB8500, },
+       { .compatible = "stericsson,ab8505-gpio", .data = (void *)PINCTRL_AB8505, },
+       { .compatible = "stericsson,ab8540-gpio", .data = (void *)PINCTRL_AB8540, },
+       { .compatible = "stericsson,ab9540-gpio", .data = (void *)PINCTRL_AB9540, },
+};
+
+static int abx500_gpio_probe(struct platform_device *pdev)
+{
+       struct ab8500_platform_data *abx500_pdata =
+                               dev_get_platdata(pdev->dev.parent);
+       struct abx500_gpio_platform_data *pdata = NULL;
+       struct device_node *np = pdev->dev.of_node;
+       struct abx500_pinctrl *pct;
+       const struct platform_device_id *platid = platform_get_device_id(pdev);
+       unsigned int id = -1;
+       int ret, err;
+       int i;
+
+       if (abx500_pdata)
+               pdata = abx500_pdata->gpio;
+       if (!pdata) {
+               if (np) {
+                       const struct of_device_id *match;
+
+                       match = of_match_device(abx500_gpio_match, &pdev->dev);
+                       if (!match)
+                               return -ENODEV;
+                       id = (unsigned long)match->data;
+               } else {
+                       dev_err(&pdev->dev, "gpio dt and platform data missing\n");
+                       return -ENODEV;
+               }
+       }
+
+       if (platid)
+               id = platid->driver_data;
+
+       pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
+                                  GFP_KERNEL);
+       if (pct == NULL) {
+               dev_err(&pdev->dev,
+                       "failed to allocate memory for pct\n");
+               return -ENOMEM;
+       }
+
+       pct->dev = &pdev->dev;
+       pct->parent = dev_get_drvdata(pdev->dev.parent);
+       pct->chip = abx500gpio_chip;
+       pct->chip.dev = &pdev->dev;
+       pct->chip.base = pdata->gpio_base;
+       pct->chip.base = (np) ? -1 : pdata->gpio_base;
+
+       /* initialize the lock */
+       mutex_init(&pct->lock);
+
+       /* Poke in other ASIC variants here */
+       switch (id) {
+       case PINCTRL_AB8500:
+               abx500_pinctrl_ab8500_init(&pct->soc);
+               break;
+       case PINCTRL_AB8540:
+               abx500_pinctrl_ab8540_init(&pct->soc);
+               break;
+       case PINCTRL_AB9540:
+               abx500_pinctrl_ab9540_init(&pct->soc);
+               break;
+       case PINCTRL_AB8505:
+               abx500_pinctrl_ab8505_init(&pct->soc);
+               break;
+       default:
+               dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n",
+                               (int) platid->driver_data);
+               mutex_destroy(&pct->lock);
+               return -EINVAL;
+       }
+
+       if (!pct->soc) {
+               dev_err(&pdev->dev, "Invalid SOC data\n");
+               mutex_destroy(&pct->lock);
+               return -EINVAL;
+       }
+
+       pct->chip.ngpio = abx500_get_gpio_num(pct->soc);
+       pct->irq_cluster = pct->soc->gpio_irq_cluster;
+       pct->irq_cluster_size = pct->soc->ngpio_irq_cluster;
+
+       ret = gpiochip_add(&pct->chip);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
+               mutex_destroy(&pct->lock);
+               return ret;
+       }
+       dev_info(&pdev->dev, "added gpiochip\n");
+
+       abx500_pinctrl_desc.pins = pct->soc->pins;
+       abx500_pinctrl_desc.npins = pct->soc->npins;
+       pct->pctldev = pinctrl_register(&abx500_pinctrl_desc, &pdev->dev, pct);
+       if (!pct->pctldev) {
+               dev_err(&pdev->dev,
+                       "could not register abx500 pinctrl driver\n");
+               ret = -EINVAL;
+               goto out_rem_chip;
+       }
+       dev_info(&pdev->dev, "registered pin controller\n");
+
+       /* We will handle a range of GPIO pins */
+       for (i = 0; i < pct->soc->gpio_num_ranges; i++) {
+               const struct abx500_pinrange *p = &pct->soc->gpio_ranges[i];
+
+               ret = gpiochip_add_pin_range(&pct->chip,
+                                       dev_name(&pdev->dev),
+                                       p->offset - 1, p->offset, p->npins);
+               if (ret < 0)
+                       goto out_rem_chip;
+       }
+
+       platform_set_drvdata(pdev, pct);
+       dev_info(&pdev->dev, "initialized abx500 pinctrl driver\n");
+
+       return 0;
+
+out_rem_chip:
+       err = gpiochip_remove(&pct->chip);
+       if (err)
+               dev_info(&pdev->dev, "failed to remove gpiochip\n");
+
+       mutex_destroy(&pct->lock);
+       return ret;
+}
+
+/**
+ * abx500_gpio_remove() - remove Ab8500-gpio driver
+ * @pdev:      Platform device registered
+ */
+static int abx500_gpio_remove(struct platform_device *pdev)
+{
+       struct abx500_pinctrl *pct = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = gpiochip_remove(&pct->chip);
+       if (ret < 0) {
+               dev_err(pct->dev, "unable to remove gpiochip: %d\n",
+                       ret);
+               return ret;
+       }
+
+       mutex_destroy(&pct->lock);
+
+       return 0;
+}
+
+static const struct platform_device_id abx500_pinctrl_id[] = {
+       { "pinctrl-ab8500", PINCTRL_AB8500 },
+       { "pinctrl-ab8540", PINCTRL_AB8540 },
+       { "pinctrl-ab9540", PINCTRL_AB9540 },
+       { "pinctrl-ab8505", PINCTRL_AB8505 },
+       { },
+};
+
+static struct platform_driver abx500_gpio_driver = {
+       .driver = {
+               .name = "abx500-gpio",
+               .owner = THIS_MODULE,
+               .of_match_table = abx500_gpio_match,
+       },
+       .probe = abx500_gpio_probe,
+       .remove = abx500_gpio_remove,
+       .id_table = abx500_pinctrl_id,
+};
+
+static int __init abx500_gpio_init(void)
+{
+       return platform_driver_register(&abx500_gpio_driver);
+}
+core_initcall(abx500_gpio_init);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("Driver allows to use AxB5xx unused pins to be used as GPIO");
+MODULE_ALIAS("platform:abx500-gpio");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/pinctrl-abx500.h
new file mode 100644 (file)
index 0000000..eeca8f9
--- /dev/null
@@ -0,0 +1,234 @@
+#ifndef PINCTRL_PINCTRL_ABx5O0_H
+#define PINCTRL_PINCTRL_ABx500_H
+
+/* Package definitions */
+#define PINCTRL_AB8500 0
+#define PINCTRL_AB8540 1
+#define PINCTRL_AB9540 2
+#define PINCTRL_AB8505 3
+
+/* pins alternate function */
+enum abx500_pin_func {
+       ABX500_DEFAULT,
+       ABX500_ALT_A,
+       ABX500_ALT_B,
+       ABX500_ALT_C,
+};
+
+/**
+ * struct abx500_function - ABx500 pinctrl mux function
+ * @name: The name of the function, exported to pinctrl core.
+ * @groups: An array of pin groups that may select this function.
+ * @ngroups: The number of entries in @groups.
+ */
+struct abx500_function {
+       const char *name;
+       const char * const *groups;
+       unsigned ngroups;
+};
+
+/**
+ * struct abx500_pingroup - describes a ABx500 pin group
+ * @name: the name of this specific pin group
+ * @pins: an array of discrete physical pins used in this group, taken
+ *     from the driver-local pin enumeration space
+ * @num_pins: the number of pins in this group array, i.e. the number of
+ *     elements in .pins so we can iterate over that array
+ * @altsetting: the altsetting to apply to all pins in this group to
+ *     configure them to be used by a function
+ */
+struct abx500_pingroup {
+       const char *name;
+       const unsigned int *pins;
+       const unsigned npins;
+       int altsetting;
+};
+
+#define ALTERNATE_FUNCTIONS(pin, sel_bit, alt1, alt2, alta, altb, altc)        \
+{                                                                      \
+       .pin_number = pin,                                              \
+       .gpiosel_bit = sel_bit,                                         \
+       .alt_bit1 = alt1,                                               \
+       .alt_bit2 = alt2,                                               \
+       .alta_val = alta,                                               \
+       .altb_val = altb,                                               \
+       .altc_val = altc,                                               \
+}
+
+#define UNUSED -1
+/**
+ * struct alternate_functions
+ * @pin_number:                The pin number
+ * @gpiosel_bit:       Control bit in GPIOSEL register,
+ * @alt_bit1:          First AlternateFunction bit used to select the
+ *                     alternate function
+ * @alt_bit2:          Second AlternateFunction bit used to select the
+ *                     alternate function
+ *
+ *                     these 3 following fields are necessary due to none
+ *                     coherency on how to select the altA, altB and altC
+ *                     function between the ABx500 SOC family when using
+ *                     alternatfunc register.
+ * @alta_val:          value to write in alternatfunc to select altA function
+ * @altb_val:          value to write in alternatfunc to select altB function
+ * @altc_val:          value to write in alternatfunc to select altC function
+ */
+struct alternate_functions {
+       unsigned pin_number;
+       s8 gpiosel_bit;
+       s8 alt_bit1;
+       s8 alt_bit2;
+       u8 alta_val;
+       u8 altb_val;
+       u8 altc_val;
+};
+
+/**
+ * struct pullud - specific pull up/down feature
+ * @first_pin:         The pin number of the first pins which support
+ *                     specific pull up/down
+ * @last_pin:          The pin number of the last pins
+ */
+struct pullud {
+       unsigned first_pin;
+       unsigned last_pin;
+};
+
+#define GPIO_IRQ_CLUSTER(a, b, c)      \
+{                                      \
+       .start = a,                     \
+       .end = b,                       \
+       .to_irq = c,                    \
+}
+
+/**
+ * struct abx500_gpio_irq_cluster - indicates GPIOs which are interrupt
+ *                     capable
+ * @start:             The pin number of the first pin interrupt capable
+ * @end:               The pin number of the last pin interrupt capable
+ * @to_irq:            The ABx500 GPIO's associated IRQs are clustered
+ *                      together throughout the interrupt numbers at irregular
+ *                      intervals. To solve this quandary, we will place the
+ *                      read-in values into the cluster information table
+ */
+
+struct abx500_gpio_irq_cluster {
+       int start;
+       int end;
+       int to_irq;
+};
+
+/**
+ * struct abx500_pinrange - map pin numbers to GPIO offsets
+ * @offset:            offset into the GPIO local numberspace, incidentally
+ *                     identical to the offset into the local pin numberspace
+ * @npins:             number of pins to map from both offsets
+ * @altfunc:           altfunc setting to be used to enable GPIO on a pin in
+ *                     this range (may vary)
+ */
+struct abx500_pinrange {
+       unsigned int offset;
+       unsigned int npins;
+       int altfunc;
+};
+
+#define ABX500_PINRANGE(a, b, c) { .offset = a, .npins = b, .altfunc = c }
+
+/**
+ * struct abx500_pinctrl_soc_data - ABx500 pin controller per-SoC configuration
+ * @gpio_ranges:       An array of GPIO ranges for this SoC
+ * @gpio_num_ranges:   The number of GPIO ranges for this SoC
+ * @pins:              An array describing all pins the pin controller affects.
+ *                     All pins which are also GPIOs must be listed first within the
+ *                     array, and be numbered identically to the GPIO controller's
+ *                     numbering.
+ * @npins:             The number of entries in @pins.
+ * @functions:         The functions supported on this SoC.
+ * @nfunction:         The number of entries in @functions.
+ * @groups:            An array describing all pin groups the pin SoC supports.
+ * @ngroups:           The number of entries in @groups.
+ * @alternate_functions: array describing pins which supports alternate and
+ *                     how to set it.
+ * @pullud:            array describing pins which supports pull up/down
+ *                     specific registers.
+ * @gpio_irq_cluster:  An array of GPIO interrupt capable for this SoC
+ * @ngpio_irq_cluster: The number of GPIO inetrrupt capable for this SoC
+ * @irq_gpio_rising_offset: Interrupt offset used as base to compute specific
+ *                     setting strategy of the rising interrupt line
+ * @irq_gpio_falling_offset: Interrupt offset used as base to compute specific
+ *                     setting strategy of the falling interrupt line
+ * @irq_gpio_factor:   Factor used to compute specific setting strategy of
+ *                     the interrupt line
+ */
+
+struct abx500_pinctrl_soc_data {
+       const struct abx500_pinrange *gpio_ranges;
+       unsigned gpio_num_ranges;
+       const struct pinctrl_pin_desc *pins;
+       unsigned npins;
+       const struct abx500_function *functions;
+       unsigned nfunctions;
+       const struct abx500_pingroup *groups;
+       unsigned ngroups;
+       struct alternate_functions *alternate_functions;
+       struct pullud *pullud;
+       struct abx500_gpio_irq_cluster *gpio_irq_cluster;
+       unsigned ngpio_irq_cluster;
+       int irq_gpio_rising_offset;
+       int irq_gpio_falling_offset;
+       int irq_gpio_factor;
+};
+
+#ifdef CONFIG_PINCTRL_AB8500
+
+void abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab8500_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_AB8540
+
+void abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab8540_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_AB9540
+
+void abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab9540_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_AB8505
+
+void abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+abx500_pinctrl_ab8505_init(struct abx500_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#endif /* PINCTRL_PINCTRL_ABx500_H */
index de05b64f0da695708f2c5ef201c5438c96946d88..142729914c347fc305e32449c11f637a44ad654f 100644 (file)
@@ -599,7 +599,7 @@ static int exynos5440_gpio_direction_output(struct gpio_chip *gc, unsigned offse
 }
 
 /* parse the pin numbers listed in the 'samsung,exynos5440-pins' property */
-static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
+static int exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
                        struct device_node *cfg_np, unsigned int **pin_list,
                        unsigned int *npins)
 {
@@ -630,7 +630,7 @@ static int __init exynos5440_pinctrl_parse_dt_pins(struct platform_device *pdev,
  * Parse the information about all the available pin groups and pin functions
  * from device node of the pin-controller.
  */
-static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
+static int exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        struct device *dev = &pdev->dev;
@@ -723,7 +723,7 @@ static int __init exynos5440_pinctrl_parse_dt(struct platform_device *pdev,
 }
 
 /* register the pinctrl interface with the pinctrl subsystem */
-static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
+static int exynos5440_pinctrl_register(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        struct device *dev = &pdev->dev;
@@ -798,7 +798,7 @@ static int __init exynos5440_pinctrl_register(struct platform_device *pdev,
 }
 
 /* register the gpiolib interface with the gpiolib subsystem */
-static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
+static int exynos5440_gpiolib_register(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        struct gpio_chip *gc;
@@ -831,7 +831,7 @@ static int __init exynos5440_gpiolib_register(struct platform_device *pdev,
 }
 
 /* unregister the gpiolib interface with the gpiolib subsystem */
-static int __init exynos5440_gpiolib_unregister(struct platform_device *pdev,
+static int exynos5440_gpiolib_unregister(struct platform_device *pdev,
                                struct exynos5440_pinctrl_priv_data *priv)
 {
        int ret = gpiochip_remove(priv->gc);
index 8ed20e84cb0274f486264c8d00e1d5ff5c86edee..4a0d54a08890c866986cc72505e96202b526ab82 100644 (file)
@@ -170,7 +170,7 @@ static const unsigned pins_ntr[] = {GPIO4};
 static const unsigned pins_ntr8k[] = {GPIO5};
 static const unsigned pins_hrst[] = {GPIO6};
 static const unsigned pins_mdio[] = {GPIO7, GPIO8};
-static const unsigned pins_bled[] = {GPIO7, GPIO10, GPIO11,
+static const unsigned pins_bled[] = {GPIO9, GPIO10, GPIO11,
                                        GPIO12, GPIO13, GPIO14};
 static const unsigned pins_asc0[] = {GPIO32, GPIO33};
 static const unsigned pins_spi[] = {GPIO34, GPIO35, GPIO36};
@@ -315,6 +315,37 @@ static int falcon_pinconf_set(struct pinctrl_dev *pctrldev,
 static void falcon_pinconf_dbg_show(struct pinctrl_dev *pctrldev,
                        struct seq_file *s, unsigned offset)
 {
+       unsigned long config;
+       struct pin_desc *desc;
+
+       struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+       int port = PORT(offset);
+
+       seq_printf(s, " (port %d) mux %d -- ", port,
+               pad_r32(info->membase[port], LTQ_PADC_MUX(PORT_PIN(offset))));
+
+       config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_PULL, 0);
+       if (!falcon_pinconf_get(pctrldev, offset, &config))
+               seq_printf(s, "pull %d ",
+                       (int)LTQ_PINCONF_UNPACK_ARG(config));
+
+       config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_DRIVE_CURRENT, 0);
+       if (!falcon_pinconf_get(pctrldev, offset, &config))
+               seq_printf(s, "drive-current %d ",
+                       (int)LTQ_PINCONF_UNPACK_ARG(config));
+
+       config = LTQ_PINCONF_PACK(LTQ_PINCONF_PARAM_SLEW_RATE, 0);
+       if (!falcon_pinconf_get(pctrldev, offset, &config))
+               seq_printf(s, "slew-rate %d ",
+                       (int)LTQ_PINCONF_UNPACK_ARG(config));
+
+       desc = pin_desc_get(pctrldev, offset);
+       if (desc) {
+               if (desc->gpio_owner)
+                       seq_printf(s, " owner: %s", desc->gpio_owner);
+       } else {
+               seq_printf(s, " not registered");
+       }
 }
 
 static void falcon_pinconf_group_dbg_show(struct pinctrl_dev *pctrldev,
@@ -360,6 +391,8 @@ static const struct ltq_cfg_param falcon_cfg_params[] = {
 static struct ltq_pinmux_info falcon_info = {
        .desc           = &falcon_pctrl_desc,
        .apply_mux      = falcon_mux_apply,
+       .params         = falcon_cfg_params,
+       .num_params     = ARRAY_SIZE(falcon_cfg_params),
 };
 
 
@@ -398,6 +431,9 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
                u32 avail;
                int pins;
 
+               if (!of_device_is_available(np))
+                       continue;
+
                if (!ppdev) {
                        dev_err(&pdev->dev, "failed to find pad pdev\n");
                        continue;
index 15f501d89026f2360cf59639c464e5200f5e03c8..a70384611351411a8396aa23288649aa044c283b 100644 (file)
@@ -64,11 +64,13 @@ static void ltq_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
        seq_printf(s, " %s", dev_name(pctldev->dev));
 }
 
-static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+static void ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                                struct device_node *np,
                                struct pinctrl_map **map)
 {
        struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+       struct property *pins = of_find_property(np, "lantiq,pins", NULL);
+       struct property *groups = of_find_property(np, "lantiq,groups", NULL);
        unsigned long configs[3];
        unsigned num_configs = 0;
        struct property *prop;
@@ -76,8 +78,20 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
        const char *function;
        int ret, i;
 
+       if (!pins && !groups) {
+               dev_err(pctldev->dev, "%s defines neither pins nor groups\n",
+                       np->name);
+               return;
+       }
+
+       if (pins && groups) {
+               dev_err(pctldev->dev, "%s defines both pins and groups\n",
+                       np->name);
+               return;
+       }
+
        ret = of_property_read_string(np, "lantiq,function", &function);
-       if (!ret) {
+       if (groups && !ret) {
                of_property_for_each_string(np, "lantiq,groups", prop, group) {
                        (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
                        (*map)->name = function;
@@ -85,11 +99,6 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                        (*map)->data.mux.function = function;
                        (*map)++;
                }
-               if (of_find_property(np, "lantiq,pins", NULL))
-                       dev_err(pctldev->dev,
-                               "%s mixes pins and groups settings\n",
-                               np->name);
-               return 0;
        }
 
        for (i = 0; i < info->num_params; i++) {
@@ -103,7 +112,7 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
        }
 
        if (!num_configs)
-               return -EINVAL;
+               return;
 
        of_property_for_each_string(np, "lantiq,pins", prop, pin) {
                (*map)->data.configs.configs = kmemdup(configs,
@@ -115,7 +124,16 @@ static int ltq_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
                (*map)->data.configs.num_configs = num_configs;
                (*map)++;
        }
-       return 0;
+       of_property_for_each_string(np, "lantiq,groups", prop, group) {
+               (*map)->data.configs.configs = kmemdup(configs,
+                                       num_configs * sizeof(unsigned long),
+                                       GFP_KERNEL);
+               (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP;
+               (*map)->name = group;
+               (*map)->data.configs.group_or_pin = group;
+               (*map)->data.configs.num_configs = num_configs;
+               (*map)++;
+       }
 }
 
 static int ltq_pinctrl_dt_subnode_size(struct device_node *np)
@@ -135,23 +153,19 @@ static int ltq_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
 {
        struct pinctrl_map *tmp;
        struct device_node *np;
-       int ret;
+       int max_maps = 0;
 
-       *num_maps = 0;
        for_each_child_of_node(np_config, np)
-               *num_maps += ltq_pinctrl_dt_subnode_size(np);
-       *map = kzalloc(*num_maps * sizeof(struct pinctrl_map), GFP_KERNEL);
+               max_maps += ltq_pinctrl_dt_subnode_size(np);
+       *map = kzalloc(max_maps * sizeof(struct pinctrl_map) * 2, GFP_KERNEL);
        if (!*map)
                return -ENOMEM;
        tmp = *map;
 
-       for_each_child_of_node(np_config, np) {
-               ret = ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp);
-               if (ret < 0) {
-                       ltq_pinctrl_dt_free_map(pctldev, *map, *num_maps);
-                       return ret;
-               }
-       }
+       for_each_child_of_node(np_config, np)
+               ltq_pinctrl_dt_subnode_to_map(pctldev, np, &tmp);
+       *num_maps = ((int)(tmp - *map));
+
        return 0;
 }
 
@@ -280,7 +294,7 @@ static int ltq_pmx_gpio_request_enable(struct pinctrl_dev *pctrldev,
                                unsigned pin)
 {
        struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
-       int mfp = match_mfp(info, pin + (range->id * 32));
+       int mfp = match_mfp(info, pin);
        int pin_func;
 
        if (mfp < 0) {
index 4419d32a0ade00acb70d2ad56da68ea108d969ab..6d07f0238532050c1c7ecd54cefd65bb8439ddaf 100644 (file)
@@ -34,6 +34,7 @@ enum ltq_pinconf_param {
        LTQ_PINCONF_PARAM_OPEN_DRAIN,
        LTQ_PINCONF_PARAM_DRIVE_CURRENT,
        LTQ_PINCONF_PARAM_SLEW_RATE,
+       LTQ_PINCONF_PARAM_OUTPUT,
 };
 
 struct ltq_cfg_param {
index dd227d21dcf28563c1d0902d51fa8a3b8140aeed..23af9f1f9c35e6d9d7793075ad7ec2df92070043 100644 (file)
@@ -146,7 +146,7 @@ free:
 static void mxs_dt_free_map(struct pinctrl_dev *pctldev,
                            struct pinctrl_map *map, unsigned num_maps)
 {
-       int i;
+       u32 i;
 
        for (i = 0; i < num_maps; i++) {
                if (map[i].type == PIN_MAP_TYPE_MUX_GROUP)
@@ -203,7 +203,7 @@ static int mxs_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned selector,
        void __iomem *reg;
        u8 bank, shift;
        u16 pin;
-       int i;
+       u32 i;
 
        for (i = 0; i < g->npins; i++) {
                bank = PINID_TO_BANK(g->pins[i]);
@@ -256,7 +256,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
        void __iomem *reg;
        u8 ma, vol, pull, bank, shift;
        u16 pin;
-       int i;
+       u32 i;
 
        ma = CONFIG_TO_MA(config);
        vol = CONFIG_TO_VOL(config);
@@ -345,8 +345,7 @@ static int mxs_pinctrl_parse_group(struct platform_device *pdev,
        const char *propname = "fsl,pinmux-ids";
        char *group;
        int length = strlen(np->name) + SUFFIX_LEN;
-       int i;
-       u32 val;
+       u32 val, i;
 
        group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
        if (!group)
index 1bb16ffb4e41a9d0f252fc8a563188f97f343642..de9e8519b803570e4188bb58d5b0c77cf6ab004f 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/irqdomain.h>
 #include <linux/slab.h>
 #include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
 #include <linux/pinctrl/pinconf.h>
@@ -32,8 +34,8 @@
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_data/pinctrl-nomadik.h>
 #include <asm/mach/irq.h>
-#include <mach/irqs.h>
 #include "pinctrl-nomadik.h"
+#include "core.h"
 
 /*
  * The GPIO module in the Nomadik family of Systems-on-Chip is an
@@ -216,7 +218,7 @@ nmk_gpio_disable_lazy_irq(struct nmk_gpio_chip *nmk_chip, unsigned offset)
        u32 falling = nmk_chip->fimsc & BIT(offset);
        u32 rising = nmk_chip->rimsc & BIT(offset);
        int gpio = nmk_chip->chip.base + offset;
-       int irq = NOMADIK_GPIO_TO_IRQ(gpio);
+       int irq = irq_find_mapping(nmk_chip->domain, offset);
        struct irq_data *d = irq_get_irq_data(irq);
 
        if (!rising && !falling)
@@ -676,7 +678,7 @@ int nmk_gpio_set_mode(int gpio, int gpio_mode)
 }
 EXPORT_SYMBOL(nmk_gpio_set_mode);
 
-static int nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
+static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
 {
        int i;
        u16 reg;
@@ -1341,8 +1343,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
 
                if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
                        dev_err(&dev->dev, "gpio-bank property not found\n");
-                       ret = -EINVAL;
-                       goto out;
+                       return -EINVAL;
                }
 
                pdata->first_gpio = dev->id * NMK_GPIO_PER_CHIP;
@@ -1350,41 +1351,29 @@ static int nmk_gpio_probe(struct platform_device *dev)
        }
 
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!res) {
-               ret = -ENOENT;
-               goto out;
-       }
+       if (!res)
+               return -ENOENT;
 
        irq = platform_get_irq(dev, 0);
-       if (irq < 0) {
-               ret = irq;
-               goto out;
-       }
+       if (irq < 0)
+               return irq;
 
        secondary_irq = platform_get_irq(dev, 1);
-       if (secondary_irq >= 0 && !pdata->get_secondary_status) {
-               ret = -EINVAL;
-               goto out;
-       }
+       if (secondary_irq >= 0 && !pdata->get_secondary_status)
+               return -EINVAL;
 
        base = devm_request_and_ioremap(&dev->dev, res);
-       if (!base) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (!base)
+               return -ENOMEM;
 
        clk = devm_clk_get(&dev->dev, NULL);
-       if (IS_ERR(clk)) {
-               ret = PTR_ERR(clk);
-               goto out;
-       }
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
        clk_prepare(clk);
 
        nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
-       if (!nmk_chip) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (!nmk_chip)
+               return -ENOMEM;
 
        /*
         * The virt address in nmk_chip->addr is in the nomadik register space,
@@ -1418,7 +1407,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
 
        ret = gpiochip_add(&nmk_chip->chip);
        if (ret)
-               goto out;
+               return ret;
 
        BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
 
@@ -1427,14 +1416,15 @@ static int nmk_gpio_probe(struct platform_device *dev)
        platform_set_drvdata(dev, nmk_chip);
 
        if (!np)
-               irq_start = NOMADIK_GPIO_TO_IRQ(pdata->first_gpio);
+               irq_start = pdata->first_irq;
        nmk_chip->domain = irq_domain_add_simple(np,
                                NMK_GPIO_PER_CHIP, irq_start,
                                &nmk_gpio_irq_simple_ops, nmk_chip);
        if (!nmk_chip->domain) {
                dev_err(&dev->dev, "failed to create irqdomain\n");
-               ret = -ENOSYS;
-               goto out;
+               /* Just do this, no matter if it fails */
+               ret = gpiochip_remove(&nmk_chip->chip);
+               return -ENOSYS;
        }
 
        nmk_gpio_init_irq(nmk_chip);
@@ -1442,12 +1432,6 @@ static int nmk_gpio_probe(struct platform_device *dev)
        dev_info(&dev->dev, "at address %p\n", nmk_chip->addr);
 
        return 0;
-
-out:
-       dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
-                 pdata->first_gpio, pdata->first_gpio+31);
-
-       return ret;
 }
 
 static int nmk_get_groups_cnt(struct pinctrl_dev *pctldev)
@@ -1508,11 +1492,285 @@ static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
        nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
 }
 
+static void nmk_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
+               struct pinctrl_map *map, unsigned num_maps)
+{
+       int i;
+
+       for (i = 0; i < num_maps; i++)
+               if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN)
+                       kfree(map[i].data.configs.configs);
+       kfree(map);
+}
+
+static int nmk_dt_reserve_map(struct pinctrl_map **map, unsigned *reserved_maps,
+               unsigned *num_maps, unsigned reserve)
+{
+       unsigned old_num = *reserved_maps;
+       unsigned new_num = *num_maps + reserve;
+       struct pinctrl_map *new_map;
+
+       if (old_num >= new_num)
+               return 0;
+
+       new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+       if (!new_map)
+               return -ENOMEM;
+
+       memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+       *map = new_map;
+       *reserved_maps = new_num;
+
+       return 0;
+}
+
+static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
+               unsigned *num_maps, const char *group,
+               const char *function)
+{
+       if (*num_maps == *reserved_maps)
+               return -ENOSPC;
+
+       (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+       (*map)[*num_maps].data.mux.group = group;
+       (*map)[*num_maps].data.mux.function = function;
+       (*num_maps)++;
+
+       return 0;
+}
+
+static int nmk_dt_add_map_configs(struct pinctrl_map **map,
+               unsigned *reserved_maps,
+               unsigned *num_maps, const char *group,
+               unsigned long *configs, unsigned num_configs)
+{
+       unsigned long *dup_configs;
+
+       if (*num_maps == *reserved_maps)
+               return -ENOSPC;
+
+       dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+                             GFP_KERNEL);
+       if (!dup_configs)
+               return -ENOMEM;
+
+       (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN;
+
+       (*map)[*num_maps].data.configs.group_or_pin = group;
+       (*map)[*num_maps].data.configs.configs = dup_configs;
+       (*map)[*num_maps].data.configs.num_configs = num_configs;
+       (*num_maps)++;
+
+       return 0;
+}
+
+#define NMK_CONFIG_PIN(x,y) { .property = x, .config = y, }
+#define NMK_CONFIG_PIN_ARRAY(x,y) { .property = x, .choice = y, \
+       .size = ARRAY_SIZE(y), }
+
+static const unsigned long nmk_pin_input_modes[] = {
+       PIN_INPUT_NOPULL,
+       PIN_INPUT_PULLUP,
+       PIN_INPUT_PULLDOWN,
+};
+
+static const unsigned long nmk_pin_output_modes[] = {
+       PIN_OUTPUT_LOW,
+       PIN_OUTPUT_HIGH,
+       PIN_DIR_OUTPUT,
+};
+
+static const unsigned long nmk_pin_sleep_modes[] = {
+       PIN_SLEEPMODE_DISABLED,
+       PIN_SLEEPMODE_ENABLED,
+};
+
+static const unsigned long nmk_pin_sleep_input_modes[] = {
+       PIN_SLPM_INPUT_NOPULL,
+       PIN_SLPM_INPUT_PULLUP,
+       PIN_SLPM_INPUT_PULLDOWN,
+       PIN_SLPM_DIR_INPUT,
+};
+
+static const unsigned long nmk_pin_sleep_output_modes[] = {
+       PIN_SLPM_OUTPUT_LOW,
+       PIN_SLPM_OUTPUT_HIGH,
+       PIN_SLPM_DIR_OUTPUT,
+};
+
+static const unsigned long nmk_pin_sleep_wakeup_modes[] = {
+       PIN_SLPM_WAKEUP_DISABLE,
+       PIN_SLPM_WAKEUP_ENABLE,
+};
+
+static const unsigned long nmk_pin_gpio_modes[] = {
+       PIN_GPIOMODE_DISABLED,
+       PIN_GPIOMODE_ENABLED,
+};
+
+static const unsigned long nmk_pin_sleep_pdis_modes[] = {
+       PIN_SLPM_PDIS_DISABLED,
+       PIN_SLPM_PDIS_ENABLED,
+};
+
+struct nmk_cfg_param {
+       const char *property;
+       unsigned long config;
+       const unsigned long *choice;
+       int size;
+};
+
+static const struct nmk_cfg_param nmk_cfg_params[] = {
+       NMK_CONFIG_PIN_ARRAY("ste,input",               nmk_pin_input_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,output",              nmk_pin_output_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,sleep",               nmk_pin_sleep_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,sleep-input",         nmk_pin_sleep_input_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,sleep-output",        nmk_pin_sleep_output_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,sleep-wakeup",        nmk_pin_sleep_wakeup_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,gpio",                nmk_pin_gpio_modes),
+       NMK_CONFIG_PIN_ARRAY("ste,sleep-pull-disable",  nmk_pin_sleep_pdis_modes),
+};
+
+static int nmk_dt_pin_config(int index, int val, unsigned long *config)
+{
+       int ret = 0;
+
+       if (nmk_cfg_params[index].choice == NULL)
+               *config = nmk_cfg_params[index].config;
+       else {
+               /* test if out of range */
+               if  (val < nmk_cfg_params[index].size) {
+                       *config = nmk_cfg_params[index].config |
+                               nmk_cfg_params[index].choice[val];
+               }
+       }
+       return ret;
+}
+
+static const char *nmk_find_pin_name(struct pinctrl_dev *pctldev, const char *pin_name)
+{
+       int i, pin_number;
+       struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
+
+       if (sscanf((char *)pin_name, "GPIO%d", &pin_number) == 1)
+               for (i = 0; i < npct->soc->npins; i++)
+                       if (npct->soc->pins[i].number == pin_number)
+                               return npct->soc->pins[i].name;
+       return NULL;
+}
+
+static bool nmk_pinctrl_dt_get_config(struct device_node *np,
+               unsigned long *configs)
+{
+       bool has_config = 0;
+       unsigned long cfg = 0;
+       int i, val, ret;
+
+       for (i = 0; i < ARRAY_SIZE(nmk_cfg_params); i++) {
+               ret = of_property_read_u32(np,
+                               nmk_cfg_params[i].property, &val);
+               if (ret != -EINVAL) {
+                       if (nmk_dt_pin_config(i, val, &cfg) == 0) {
+                               *configs |= cfg;
+                               has_config = 1;
+                       }
+               }
+       }
+
+       return has_config;
+}
+
+int nmk_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+               struct device_node *np,
+               struct pinctrl_map **map,
+               unsigned *reserved_maps,
+               unsigned *num_maps)
+{
+       int ret;
+       const char *function = NULL;
+       unsigned long configs = 0;
+       bool has_config = 0;
+       unsigned reserve = 0;
+       struct property *prop;
+       const char *group, *gpio_name;
+       struct device_node *np_config;
+
+       ret = of_property_read_string(np, "ste,function", &function);
+       if (ret >= 0)
+               reserve = 1;
+
+       has_config = nmk_pinctrl_dt_get_config(np, &configs);
+
+       np_config = of_parse_phandle(np, "ste,config", 0);
+       if (np_config)
+               has_config |= nmk_pinctrl_dt_get_config(np_config, &configs);
+
+       ret = of_property_count_strings(np, "ste,pins");
+       if (ret < 0)
+               goto exit;
+
+       if (has_config)
+               reserve++;
+
+       reserve *= ret;
+
+       ret = nmk_dt_reserve_map(map, reserved_maps, num_maps, reserve);
+       if (ret < 0)
+               goto exit;
+
+       of_property_for_each_string(np, "ste,pins", prop, group) {
+               if (function) {
+                       ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps,
+                                         group, function);
+                       if (ret < 0)
+                               goto exit;
+               }
+               if (has_config) {
+                       gpio_name = nmk_find_pin_name(pctldev, group);
+
+                       ret = nmk_dt_add_map_configs(map, reserved_maps, num_maps,
+                                             gpio_name, &configs, 1);
+                       if (ret < 0)
+                               goto exit;
+               }
+
+       }
+exit:
+       return ret;
+}
+
+int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+                                struct device_node *np_config,
+                                struct pinctrl_map **map, unsigned *num_maps)
+{
+       unsigned reserved_maps;
+       struct device_node *np;
+       int ret;
+
+       reserved_maps = 0;
+       *map = NULL;
+       *num_maps = 0;
+
+       for_each_child_of_node(np_config, np) {
+               ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
+                               &reserved_maps, num_maps);
+               if (ret < 0) {
+                       nmk_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 static struct pinctrl_ops nmk_pinctrl_ops = {
        .get_groups_count = nmk_get_groups_cnt,
        .get_group_name = nmk_get_group_name,
        .get_group_pins = nmk_get_group_pins,
        .pin_dbg_show = nmk_pin_dbg_show,
+       .dt_node_to_map = nmk_pinctrl_dt_node_to_map,
+       .dt_free_map = nmk_pinctrl_dt_free_map,
 };
 
 static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -1846,16 +2104,39 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
 
 static const struct of_device_id nmk_pinctrl_match[] = {
        {
-               .compatible = "stericsson,nmk_pinctrl",
+               .compatible = "stericsson,nmk-pinctrl",
                .data = (void *)PINCTRL_NMK_DB8500,
        },
        {},
 };
 
+static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct nmk_pinctrl *npct;
+
+       npct = platform_get_drvdata(pdev);
+       if (!npct)
+               return -EINVAL;
+
+       return pinctrl_force_sleep(npct->pctl);
+}
+
+static int nmk_pinctrl_resume(struct platform_device *pdev)
+{
+       struct nmk_pinctrl *npct;
+
+       npct = platform_get_drvdata(pdev);
+       if (!npct)
+               return -EINVAL;
+
+       return pinctrl_force_default(npct->pctl);
+}
+
 static int nmk_pinctrl_probe(struct platform_device *pdev)
 {
        const struct platform_device_id *platid = platform_get_device_id(pdev);
        struct device_node *np = pdev->dev.of_node;
+       struct device_node *prcm_np;
        struct nmk_pinctrl *npct;
        struct resource *res;
        unsigned int version = 0;
@@ -1884,21 +2165,26 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
        if (version == PINCTRL_NMK_DB8540)
                nmk_pinctrl_db8540_init(&npct->soc);
 
+       if (np) {
+               prcm_np = of_parse_phandle(np, "prcm", 0);
+               if (prcm_np)
+                       npct->prcm_base = of_iomap(prcm_np, 0);
+       }
+
+       /* Allow platform passed information to over-write DT. */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res) {
+       if (res)
                npct->prcm_base = devm_ioremap(&pdev->dev, res->start,
                                               resource_size(res));
-               if (!npct->prcm_base) {
-                       dev_err(&pdev->dev,
-                               "failed to ioremap PRCM registers\n");
-                       return -ENOMEM;
+       if (!npct->prcm_base) {
+               if (version == PINCTRL_NMK_STN8815) {
+                       dev_info(&pdev->dev,
+                                "No PRCM base, "
+                                "assuming no ALT-Cx control is available\n");
+               } else {
+                       dev_err(&pdev->dev, "missing PRCM base address\n");
+                       return -EINVAL;
                }
-       } else if (version == PINCTRL_NMK_STN8815) {
-               dev_info(&pdev->dev,
-                        "No PRCM base, assume no ALT-Cx control is available\n");
-       } else {
-               dev_err(&pdev->dev, "missing PRCM base address\n");
-               return -EINVAL;
        }
 
        /*
@@ -1963,6 +2249,10 @@ static struct platform_driver nmk_pinctrl_driver = {
        },
        .probe = nmk_pinctrl_probe,
        .id_table = nmk_pinctrl_id,
+#ifdef CONFIG_PM
+       .suspend = nmk_pinctrl_suspend,
+       .resume = nmk_pinctrl_resume,
+#endif
 };
 
 static int __init nmk_gpio_init(void)
index fd7b24cd89084d923dd2cc1cc2a581695a040deb..5c20ed056054035eecf80d3ed3718bf5729127a5 100644 (file)
@@ -716,7 +716,6 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
        }
        ctrldesc->pins = pindesc;
        ctrldesc->npins = drvdata->ctrl->nr_pins;
-       ctrldesc->npins = drvdata->ctrl->nr_pins;
 
        /* dynamically populate the pin number and pin name for pindesc */
        for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
index f6a360b86eb6465fef029e777147ccbd3bc95b66..5c32e880bcb24315b4b2db260294106765eb4240 100644 (file)
@@ -30,7 +30,6 @@
 #define PCS_MUX_BITS_NAME              "pinctrl-single,bits"
 #define PCS_REG_NAME_LEN               ((sizeof(unsigned long) * 2) + 1)
 #define PCS_OFF_DISABLED               ~0U
-#define PCS_MAX_GPIO_VALUES            2
 
 /**
  * struct pcs_pingroup - pingroups for a function
@@ -77,16 +76,6 @@ struct pcs_function {
        struct list_head node;
 };
 
-/**
- * struct pcs_gpio_range - pinctrl gpio range
- * @range:     subrange of the GPIO number space
- * @gpio_func: gpio function value in the pinmux register
- */
-struct pcs_gpio_range {
-       struct pinctrl_gpio_range range;
-       int gpio_func;
-};
-
 /**
  * struct pcs_data - wrapper for data needed by pinctrl framework
  * @pa:                pindesc array
@@ -414,26 +403,9 @@ static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
 }
 
 static int pcs_request_gpio(struct pinctrl_dev *pctldev,
-                           struct pinctrl_gpio_range *range, unsigned pin)
+                       struct pinctrl_gpio_range *range, unsigned offset)
 {
-       struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
-       struct pcs_gpio_range *gpio = NULL;
-       int end, mux_bytes;
-       unsigned data;
-
-       gpio = container_of(range, struct pcs_gpio_range, range);
-       end = range->pin_base + range->npins - 1;
-       if (pin < range->pin_base || pin > end) {
-               dev_err(pctldev->dev,
-                       "pin %d isn't in the range of %d to %d\n",
-                       pin, range->pin_base, end);
-               return -EINVAL;
-       }
-       mux_bytes = pcs->width / BITS_PER_BYTE;
-       data = pcs->read(pcs->base + pin * mux_bytes) & ~pcs->fmask;
-       data |= gpio->gpio_func;
-       pcs->write(data, pcs->base + pin * mux_bytes);
-       return 0;
+       return -ENOTSUPP;
 }
 
 static struct pinmux_ops pcs_pinmux_ops = {
@@ -907,49 +879,6 @@ static void pcs_free_resources(struct pcs_device *pcs)
 
 static struct of_device_id pcs_of_match[];
 
-static int pcs_add_gpio_range(struct device_node *node, struct pcs_device *pcs)
-{
-       struct pcs_gpio_range *gpio;
-       struct device_node *child;
-       struct resource r;
-       const char name[] = "pinctrl-single";
-       u32 gpiores[PCS_MAX_GPIO_VALUES];
-       int ret, i = 0, mux_bytes = 0;
-
-       for_each_child_of_node(node, child) {
-               ret = of_address_to_resource(child, 0, &r);
-               if (ret < 0)
-                       continue;
-               memset(gpiores, 0, sizeof(u32) * PCS_MAX_GPIO_VALUES);
-               ret = of_property_read_u32_array(child, "pinctrl-single,gpio",
-                                                gpiores, PCS_MAX_GPIO_VALUES);
-               if (ret < 0)
-                       continue;
-               gpio = devm_kzalloc(pcs->dev, sizeof(*gpio), GFP_KERNEL);
-               if (!gpio) {
-                       dev_err(pcs->dev, "failed to allocate pcs gpio\n");
-                       return -ENOMEM;
-               }
-               gpio->range.name = devm_kzalloc(pcs->dev, sizeof(name),
-                                               GFP_KERNEL);
-               if (!gpio->range.name) {
-                       dev_err(pcs->dev, "failed to allocate range name\n");
-                       return -ENOMEM;
-               }
-               memcpy((char *)gpio->range.name, name, sizeof(name));
-
-               gpio->range.id = i++;
-               gpio->range.base = gpiores[0];
-               gpio->gpio_func = gpiores[1];
-               mux_bytes = pcs->width / BITS_PER_BYTE;
-               gpio->range.pin_base = (r.start - pcs->res->start) / mux_bytes;
-               gpio->range.npins = (r.end - r.start) / mux_bytes + 1;
-
-               pinctrl_add_gpio_range(pcs->pctl, &gpio->range);
-       }
-       return 0;
-}
-
 static int pcs_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -1046,10 +975,6 @@ static int pcs_probe(struct platform_device *pdev)
                goto free;
        }
 
-       ret = pcs_add_gpio_range(np, pcs);
-       if (ret < 0)
-               goto free;
-
        dev_info(pcs->dev, "%i pins at pa %p size %u\n",
                 pcs->desc.npins, pcs->base, pcs->size);
 
index 498b2ba905deb897791e032baf992e4459b88b7e..d02498b30c6ef1f4a0eb33a6f3dd23ea46850324 100644 (file)
@@ -1246,6 +1246,22 @@ static void __iomem *sirfsoc_rsc_of_iomap(void)
        return of_iomap(np, 0);
 }
 
+static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
+       const struct of_phandle_args *gpiospec,
+       u32 *flags)
+{
+       if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
+               return -EINVAL;
+
+       if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc)
+               return -EINVAL;
+
+       if (flags)
+               *flags = gpiospec->args[1];
+
+       return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE;
+}
+
 static int sirfsoc_pinmux_probe(struct platform_device *pdev)
 {
        int ret;
@@ -1736,6 +1752,8 @@ static int sirfsoc_gpio_probe(struct device_node *np)
                bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
                bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
                bank->chip.gc.of_node = np;
+               bank->chip.gc.of_xlate = sirfsoc_gpio_of_xlate;
+               bank->chip.gc.of_gpio_n_cells = 2;
                bank->chip.regs = regs;
                bank->id = i;
                bank->is_marco = is_marco;
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
new file mode 100644 (file)
index 0000000..80b11e3
--- /dev/null
@@ -0,0 +1,1505 @@
+/*
+ * Allwinner A1X SoCs pinctrl driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun4i_a10_pins[] = {
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* TX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* RX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* RTS */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* CTS */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* DTR */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* DSR */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* DCD */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PA17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* RING */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB20,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB21,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB22,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x2, "uart0")),          /* TX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB23,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x2, "uart0")),          /* RX */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC20,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC21,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC22,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC23,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC24,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart0")),          /* TX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart0")),          /* RX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH20,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH21,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH22,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH23,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH24,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH25,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH26,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PH27,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI20,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PI21,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+};
+
+static const struct sunxi_desc_pin sun5i_a13_pins[] = {
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* TX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* RX */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* TX */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out"),
+               SUNXI_FUNCTION(0x4, "uart1")),          /* RX */
+       /* Hole */
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+       SUNXI_PIN(SUNXI_PINCTRL_PIN_PG12,
+               SUNXI_FUNCTION(0x0, "gpio_in"),
+               SUNXI_FUNCTION(0x1, "gpio_out")),
+};
+
+static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
+       .pins = sun4i_a10_pins,
+       .npins = ARRAY_SIZE(sun4i_a10_pins),
+};
+
+static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
+       .pins = sun5i_a13_pins,
+       .npins = ARRAY_SIZE(sun5i_a13_pins),
+};
+
+static struct sunxi_pinctrl_group *
+sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
+{
+       int i;
+
+       for (i = 0; i < pctl->ngroups; i++) {
+               struct sunxi_pinctrl_group *grp = pctl->groups + i;
+
+               if (!strcmp(grp->name, group))
+                       return grp;
+       }
+
+       return NULL;
+}
+
+static struct sunxi_pinctrl_function *
+sunxi_pinctrl_find_function_by_name(struct sunxi_pinctrl *pctl,
+                                   const char *name)
+{
+       struct sunxi_pinctrl_function *func = pctl->functions;
+       int i;
+
+       for (i = 0; i < pctl->nfunctions; i++) {
+               if (!func[i].name)
+                       break;
+
+               if (!strcmp(func[i].name, name))
+                       return func + i;
+       }
+
+       return NULL;
+}
+
+static struct sunxi_desc_function *
+sunxi_pinctrl_desc_find_function_by_name(struct sunxi_pinctrl *pctl,
+                                        const char *pin_name,
+                                        const char *func_name)
+{
+       int i;
+
+       for (i = 0; i < pctl->desc->npins; i++) {
+               const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+
+               if (!strcmp(pin->pin.name, pin_name)) {
+                       struct sunxi_desc_function *func = pin->functions;
+
+                       while (func->name) {
+                               if (!strcmp(func->name, func_name))
+                                       return func;
+
+                               func++;
+                       }
+               }
+       }
+
+       return NULL;
+}
+
+static int sunxi_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       return pctl->ngroups;
+}
+
+static const char *sunxi_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+                                             unsigned group)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       return pctl->groups[group].name;
+}
+
+static int sunxi_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+                                     unsigned group,
+                                     const unsigned **pins,
+                                     unsigned *num_pins)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       *pins = (unsigned *)&pctl->groups[group].pin;
+       *num_pins = 1;
+
+       return 0;
+}
+
+static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+                                     struct device_node *node,
+                                     struct pinctrl_map **map,
+                                     unsigned *num_maps)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned long *pinconfig;
+       struct property *prop;
+       const char *function;
+       const char *group;
+       int ret, nmaps, i = 0;
+       u32 val;
+
+       *map = NULL;
+       *num_maps = 0;
+
+       ret = of_property_read_string(node, "allwinner,function", &function);
+       if (ret) {
+               dev_err(pctl->dev,
+                       "missing allwinner,function property in node %s\n",
+                       node->name);
+               return -EINVAL;
+       }
+
+       nmaps = of_property_count_strings(node, "allwinner,pins") * 2;
+       if (nmaps < 0) {
+               dev_err(pctl->dev,
+                       "missing allwinner,pins property in node %s\n",
+                       node->name);
+               return -EINVAL;
+       }
+
+       *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
+       if (!map)
+               return -ENOMEM;
+
+       of_property_for_each_string(node, "allwinner,pins", prop, group) {
+               struct sunxi_pinctrl_group *grp =
+                       sunxi_pinctrl_find_group_by_name(pctl, group);
+               int j = 0, configlen = 0;
+
+               if (!grp) {
+                       dev_err(pctl->dev, "unknown pin %s", group);
+                       continue;
+               }
+
+               if (!sunxi_pinctrl_desc_find_function_by_name(pctl,
+                                                             grp->name,
+                                                             function)) {
+                       dev_err(pctl->dev, "unsupported function %s on pin %s",
+                               function, group);
+                       continue;
+               }
+
+               (*map)[i].type = PIN_MAP_TYPE_MUX_GROUP;
+               (*map)[i].data.mux.group = group;
+               (*map)[i].data.mux.function = function;
+
+               i++;
+
+               (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+               (*map)[i].data.configs.group_or_pin = group;
+
+               if (of_find_property(node, "allwinner,drive", NULL))
+                       configlen++;
+               if (of_find_property(node, "allwinner,pull", NULL))
+                       configlen++;
+
+               pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+
+               if (!of_property_read_u32(node, "allwinner,drive", &val)) {
+                       u16 strength = (val + 1) * 10;
+                       pinconfig[j++] =
+                               pinconf_to_config_packed(PIN_CONFIG_DRIVE_STRENGTH,
+                                                        strength);
+               }
+
+               if (!of_property_read_u32(node, "allwinner,pull", &val)) {
+                       enum pin_config_param pull = PIN_CONFIG_END;
+                       if (val == 1)
+                               pull = PIN_CONFIG_BIAS_PULL_UP;
+                       else if (val == 2)
+                               pull = PIN_CONFIG_BIAS_PULL_DOWN;
+                       pinconfig[j++] = pinconf_to_config_packed(pull, 0);
+               }
+
+               (*map)[i].data.configs.configs = pinconfig;
+               (*map)[i].data.configs.num_configs = configlen;
+
+               i++;
+       }
+
+       *num_maps = nmaps;
+
+       return 0;
+}
+
+static void sunxi_pctrl_dt_free_map(struct pinctrl_dev *pctldev,
+                                   struct pinctrl_map *map,
+                                   unsigned num_maps)
+{
+       int i;
+
+       for (i = 0; i < num_maps; i++) {
+               if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+                       kfree(map[i].data.configs.configs);
+       }
+
+       kfree(map);
+}
+
+static struct pinctrl_ops sunxi_pctrl_ops = {
+       .dt_node_to_map         = sunxi_pctrl_dt_node_to_map,
+       .dt_free_map            = sunxi_pctrl_dt_free_map,
+       .get_groups_count       = sunxi_pctrl_get_groups_count,
+       .get_group_name         = sunxi_pctrl_get_group_name,
+       .get_group_pins         = sunxi_pctrl_get_group_pins,
+};
+
+static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev,
+                                unsigned group,
+                                unsigned long *config)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       *config = pctl->groups[group].config;
+
+       return 0;
+}
+
+static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
+                                unsigned group,
+                                unsigned long config)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+       struct sunxi_pinctrl_group *g = &pctl->groups[group];
+       u32 val, mask;
+       u16 strength;
+       u8 dlevel;
+
+       switch (pinconf_to_config_param(config)) {
+       case PIN_CONFIG_DRIVE_STRENGTH:
+               strength = pinconf_to_config_argument(config);
+               if (strength > 40)
+                       return -EINVAL;
+               /*
+                * We convert from mA to what the register expects:
+                *   0: 10mA
+                *   1: 20mA
+                *   2: 30mA
+                *   3: 40mA
+                */
+               dlevel = strength / 10 - 1;
+               val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
+               mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
+               writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
+                       pctl->membase + sunxi_dlevel_reg(g->pin));
+               break;
+       case PIN_CONFIG_BIAS_PULL_UP:
+               val = readl(pctl->membase + sunxi_pull_reg(g->pin));
+               mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
+               writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
+                       pctl->membase + sunxi_pull_reg(g->pin));
+               break;
+       case PIN_CONFIG_BIAS_PULL_DOWN:
+               val = readl(pctl->membase + sunxi_pull_reg(g->pin));
+               mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
+               writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
+                       pctl->membase + sunxi_pull_reg(g->pin));
+               break;
+       default:
+               break;
+       }
+
+       /* cache the config value */
+       g->config = config;
+
+       return 0;
+}
+
+static struct pinconf_ops sunxi_pconf_ops = {
+       .pin_config_group_get   = sunxi_pconf_group_get,
+       .pin_config_group_set   = sunxi_pconf_group_set,
+};
+
+static int sunxi_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       return pctl->nfunctions;
+}
+
+static const char *sunxi_pmx_get_func_name(struct pinctrl_dev *pctldev,
+                                          unsigned function)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       return pctl->functions[function].name;
+}
+
+static int sunxi_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+                                    unsigned function,
+                                    const char * const **groups,
+                                    unsigned * const num_groups)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       *groups = pctl->functions[function].groups;
+       *num_groups = pctl->functions[function].ngroups;
+
+       return 0;
+}
+
+static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
+                                unsigned pin,
+                                u8 config)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+       u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
+       u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+       writel((val & ~mask) | config << sunxi_mux_offset(pin),
+               pctl->membase + sunxi_mux_reg(pin));
+}
+
+static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
+                           unsigned function,
+                           unsigned group)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+       struct sunxi_pinctrl_group *g = pctl->groups + group;
+       struct sunxi_pinctrl_function *func = pctl->functions + function;
+       struct sunxi_desc_function *desc =
+               sunxi_pinctrl_desc_find_function_by_name(pctl,
+                                                        g->name,
+                                                        func->name);
+
+       if (!desc)
+               return -EINVAL;
+
+       sunxi_pmx_set(pctldev, g->pin, desc->muxval);
+
+       return 0;
+}
+
+static int
+sunxi_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+                       struct pinctrl_gpio_range *range,
+                       unsigned offset,
+                       bool input)
+{
+       struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+       struct sunxi_desc_function *desc;
+       char pin_name[SUNXI_PIN_NAME_MAX_LEN];
+       const char *func;
+       u8 bank, pin;
+       int ret;
+
+       bank = (offset) / PINS_PER_BANK;
+       pin = (offset) % PINS_PER_BANK;
+
+       ret = sprintf(pin_name, "P%c%d", 'A' + bank, pin);
+       if (!ret)
+               goto error;
+
+       if (input)
+               func = "gpio_in";
+       else
+               func = "gpio_out";
+
+       desc = sunxi_pinctrl_desc_find_function_by_name(pctl,
+                                                       pin_name,
+                                                       func);
+       if (!desc) {
+               ret = -EINVAL;
+               goto error;
+       }
+
+       sunxi_pmx_set(pctldev, offset, desc->muxval);
+
+       ret = 0;
+
+error:
+       return ret;
+}
+
+static struct pinmux_ops sunxi_pmx_ops = {
+       .get_functions_count    = sunxi_pmx_get_funcs_cnt,
+       .get_function_name      = sunxi_pmx_get_func_name,
+       .get_function_groups    = sunxi_pmx_get_func_groups,
+       .enable                 = sunxi_pmx_enable,
+       .gpio_set_direction     = sunxi_pmx_gpio_set_direction,
+};
+
+static struct pinctrl_desc sunxi_pctrl_desc = {
+       .confops        = &sunxi_pconf_ops,
+       .pctlops        = &sunxi_pctrl_ops,
+       .pmxops         = &sunxi_pmx_ops,
+};
+
+static int sunxi_pinctrl_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+       return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void sunxi_pinctrl_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+       pinctrl_free_gpio(chip->base + offset);
+}
+
+static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
+                                       unsigned offset)
+{
+       return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+       struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
+
+       u32 reg = sunxi_data_reg(offset);
+       u8 index = sunxi_data_offset(offset);
+       u32 val = (readl(pctl->membase + reg) >> index) & DATA_PINS_MASK;
+
+       return val;
+}
+
+static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
+                                       unsigned offset, int value)
+{
+       return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
+                               unsigned offset, int value)
+{
+       struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
+       u32 reg = sunxi_data_reg(offset);
+       u8 index = sunxi_data_offset(offset);
+
+       writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+}
+
+static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
+                               const struct of_phandle_args *gpiospec,
+                               u32 *flags)
+{
+       int pin, base;
+
+       base = PINS_PER_BANK * gpiospec->args[0];
+       pin = base + gpiospec->args[1];
+
+       if (pin > (gc->base + gc->ngpio))
+               return -EINVAL;
+
+       if (flags)
+               *flags = gpiospec->args[2];
+
+       return pin;
+}
+
+static struct gpio_chip sunxi_pinctrl_gpio_chip = {
+       .owner                  = THIS_MODULE,
+       .request                = sunxi_pinctrl_gpio_request,
+       .free                   = sunxi_pinctrl_gpio_free,
+       .direction_input        = sunxi_pinctrl_gpio_direction_input,
+       .direction_output       = sunxi_pinctrl_gpio_direction_output,
+       .get                    = sunxi_pinctrl_gpio_get,
+       .set                    = sunxi_pinctrl_gpio_set,
+       .of_xlate               = sunxi_pinctrl_gpio_of_xlate,
+       .of_gpio_n_cells        = 3,
+       .can_sleep              = 0,
+};
+
+static struct of_device_id sunxi_pinctrl_match[] = {
+       { .compatible = "allwinner,sun4i-a10-pinctrl", .data = (void *)&sun4i_a10_pinctrl_data },
+       { .compatible = "allwinner,sun5i-a13-pinctrl", .data = (void *)&sun5i_a13_pinctrl_data },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sunxi_pinctrl_match);
+
+static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
+                                       const char *name)
+{
+       struct sunxi_pinctrl_function *func = pctl->functions;
+
+       while (func->name) {
+               /* function already there */
+               if (strcmp(func->name, name) == 0) {
+                       func->ngroups++;
+                       return -EEXIST;
+               }
+               func++;
+       }
+
+       func->name = name;
+       func->ngroups = 1;
+
+       pctl->nfunctions++;
+
+       return 0;
+}
+
+static int sunxi_pinctrl_build_state(struct platform_device *pdev)
+{
+       struct sunxi_pinctrl *pctl = platform_get_drvdata(pdev);
+       int i;
+
+       pctl->ngroups = pctl->desc->npins;
+
+       /* Allocate groups */
+       pctl->groups = devm_kzalloc(&pdev->dev,
+                                   pctl->ngroups * sizeof(*pctl->groups),
+                                   GFP_KERNEL);
+       if (!pctl->groups)
+               return -ENOMEM;
+
+       for (i = 0; i < pctl->desc->npins; i++) {
+               const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+               struct sunxi_pinctrl_group *group = pctl->groups + i;
+
+               group->name = pin->pin.name;
+               group->pin = pin->pin.number;
+       }
+
+       /*
+        * We suppose that we won't have any more functions than pins,
+        * we'll reallocate that later anyway
+        */
+       pctl->functions = devm_kzalloc(&pdev->dev,
+                               pctl->desc->npins * sizeof(*pctl->functions),
+                               GFP_KERNEL);
+       if (!pctl->functions)
+               return -ENOMEM;
+
+       /* Count functions and their associated groups */
+       for (i = 0; i < pctl->desc->npins; i++) {
+               const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+               struct sunxi_desc_function *func = pin->functions;
+
+               while (func->name) {
+                       sunxi_pinctrl_add_function(pctl, func->name);
+                       func++;
+               }
+       }
+
+       pctl->functions = krealloc(pctl->functions,
+                               pctl->nfunctions * sizeof(*pctl->functions),
+                               GFP_KERNEL);
+
+       for (i = 0; i < pctl->desc->npins; i++) {
+               const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+               struct sunxi_desc_function *func = pin->functions;
+
+               while (func->name) {
+                       struct sunxi_pinctrl_function *func_item;
+                       const char **func_grp;
+
+                       func_item = sunxi_pinctrl_find_function_by_name(pctl,
+                                                                       func->name);
+                       if (!func_item)
+                               return -EINVAL;
+
+                       if (!func_item->groups) {
+                               func_item->groups =
+                                       devm_kzalloc(&pdev->dev,
+                                                    func_item->ngroups * sizeof(*func_item->groups),
+                                                    GFP_KERNEL);
+                               if (!func_item->groups)
+                                       return -ENOMEM;
+                       }
+
+                       func_grp = func_item->groups;
+                       while (*func_grp)
+                               func_grp++;
+
+                       *func_grp = pin->pin.name;
+                       func++;
+               }
+       }
+
+       return 0;
+}
+
+static int sunxi_pinctrl_probe(struct platform_device *pdev)
+{
+       struct device_node *node = pdev->dev.of_node;
+       const struct of_device_id *device;
+       struct pinctrl_pin_desc *pins;
+       struct sunxi_pinctrl *pctl;
+       int i, ret, last_pin;
+
+       pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+       if (!pctl)
+               return -ENOMEM;
+       platform_set_drvdata(pdev, pctl);
+
+       pctl->membase = of_iomap(node, 0);
+       if (!pctl->membase)
+               return -ENOMEM;
+
+       device = of_match_device(sunxi_pinctrl_match, &pdev->dev);
+       if (!device)
+               return -ENODEV;
+
+       pctl->desc = (struct sunxi_pinctrl_desc *)device->data;
+
+       ret = sunxi_pinctrl_build_state(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "dt probe failed: %d\n", ret);
+               return ret;
+       }
+
+       pins = devm_kzalloc(&pdev->dev,
+                           pctl->desc->npins * sizeof(*pins),
+                           GFP_KERNEL);
+       if (!pins)
+               return -ENOMEM;
+
+       for (i = 0; i < pctl->desc->npins; i++)
+               pins[i] = pctl->desc->pins[i].pin;
+
+       sunxi_pctrl_desc.name = dev_name(&pdev->dev);
+       sunxi_pctrl_desc.owner = THIS_MODULE;
+       sunxi_pctrl_desc.pins = pins;
+       sunxi_pctrl_desc.npins = pctl->desc->npins;
+       pctl->dev = &pdev->dev;
+       pctl->pctl_dev = pinctrl_register(&sunxi_pctrl_desc,
+                                         &pdev->dev, pctl);
+       if (!pctl->pctl_dev) {
+               dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+               return -EINVAL;
+       }
+
+       pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
+       if (!pctl->chip) {
+               ret = -ENOMEM;
+               goto pinctrl_error;
+       }
+
+       last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
+       pctl->chip = &sunxi_pinctrl_gpio_chip;
+       pctl->chip->ngpio = round_up(last_pin, PINS_PER_BANK);
+       pctl->chip->label = dev_name(&pdev->dev);
+       pctl->chip->dev = &pdev->dev;
+       pctl->chip->base = 0;
+
+       ret = gpiochip_add(pctl->chip);
+       if (ret)
+               goto pinctrl_error;
+
+       for (i = 0; i < pctl->desc->npins; i++) {
+               const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
+
+               ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
+                                            pin->pin.number,
+                                            pin->pin.number, 1);
+               if (ret)
+                       goto gpiochip_error;
+       }
+
+       dev_info(&pdev->dev, "initialized sunXi PIO driver\n");
+
+       return 0;
+
+gpiochip_error:
+       ret = gpiochip_remove(pctl->chip);
+pinctrl_error:
+       pinctrl_unregister(pctl->pctl_dev);
+       return ret;
+}
+
+static struct platform_driver sunxi_pinctrl_driver = {
+       .probe = sunxi_pinctrl_probe,
+       .driver = {
+               .name = "sunxi-pinctrl",
+               .owner = THIS_MODULE,
+               .of_match_table = sunxi_pinctrl_match,
+       },
+};
+module_platform_driver(sunxi_pinctrl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner A1X pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
new file mode 100644 (file)
index 0000000..e921621
--- /dev/null
@@ -0,0 +1,478 @@
+/*
+ * Allwinner A1X SoCs pinctrl driver.
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PINCTRL_SUNXI_H
+#define __PINCTRL_SUNXI_H
+
+#include <linux/kernel.h>
+
+#define PA_BASE        0
+#define PB_BASE        32
+#define PC_BASE        64
+#define PD_BASE        96
+#define PE_BASE        128
+#define PF_BASE        160
+#define PG_BASE        192
+#define PH_BASE        224
+#define PI_BASE        256
+
+#define SUNXI_PINCTRL_PIN_PA0  PINCTRL_PIN(PA_BASE + 0, "PA0")
+#define SUNXI_PINCTRL_PIN_PA1  PINCTRL_PIN(PA_BASE + 1, "PA1")
+#define SUNXI_PINCTRL_PIN_PA2  PINCTRL_PIN(PA_BASE + 2, "PA2")
+#define SUNXI_PINCTRL_PIN_PA3  PINCTRL_PIN(PA_BASE + 3, "PA3")
+#define SUNXI_PINCTRL_PIN_PA4  PINCTRL_PIN(PA_BASE + 4, "PA4")
+#define SUNXI_PINCTRL_PIN_PA5  PINCTRL_PIN(PA_BASE + 5, "PA5")
+#define SUNXI_PINCTRL_PIN_PA6  PINCTRL_PIN(PA_BASE + 6, "PA6")
+#define SUNXI_PINCTRL_PIN_PA7  PINCTRL_PIN(PA_BASE + 7, "PA7")
+#define SUNXI_PINCTRL_PIN_PA8  PINCTRL_PIN(PA_BASE + 8, "PA8")
+#define SUNXI_PINCTRL_PIN_PA9  PINCTRL_PIN(PA_BASE + 9, "PA9")
+#define SUNXI_PINCTRL_PIN_PA10 PINCTRL_PIN(PA_BASE + 10, "PA10")
+#define SUNXI_PINCTRL_PIN_PA11 PINCTRL_PIN(PA_BASE + 11, "PA11")
+#define SUNXI_PINCTRL_PIN_PA12 PINCTRL_PIN(PA_BASE + 12, "PA12")
+#define SUNXI_PINCTRL_PIN_PA13 PINCTRL_PIN(PA_BASE + 13, "PA13")
+#define SUNXI_PINCTRL_PIN_PA14 PINCTRL_PIN(PA_BASE + 14, "PA14")
+#define SUNXI_PINCTRL_PIN_PA15 PINCTRL_PIN(PA_BASE + 15, "PA15")
+#define SUNXI_PINCTRL_PIN_PA16 PINCTRL_PIN(PA_BASE + 16, "PA16")
+#define SUNXI_PINCTRL_PIN_PA17 PINCTRL_PIN(PA_BASE + 17, "PA17")
+#define SUNXI_PINCTRL_PIN_PA18 PINCTRL_PIN(PA_BASE + 18, "PA18")
+#define SUNXI_PINCTRL_PIN_PA19 PINCTRL_PIN(PA_BASE + 19, "PA19")
+#define SUNXI_PINCTRL_PIN_PA20 PINCTRL_PIN(PA_BASE + 20, "PA20")
+#define SUNXI_PINCTRL_PIN_PA21 PINCTRL_PIN(PA_BASE + 21, "PA21")
+#define SUNXI_PINCTRL_PIN_PA22 PINCTRL_PIN(PA_BASE + 22, "PA22")
+#define SUNXI_PINCTRL_PIN_PA23 PINCTRL_PIN(PA_BASE + 23, "PA23")
+#define SUNXI_PINCTRL_PIN_PA24 PINCTRL_PIN(PA_BASE + 24, "PA24")
+#define SUNXI_PINCTRL_PIN_PA25 PINCTRL_PIN(PA_BASE + 25, "PA25")
+#define SUNXI_PINCTRL_PIN_PA26 PINCTRL_PIN(PA_BASE + 26, "PA26")
+#define SUNXI_PINCTRL_PIN_PA27 PINCTRL_PIN(PA_BASE + 27, "PA27")
+#define SUNXI_PINCTRL_PIN_PA28 PINCTRL_PIN(PA_BASE + 28, "PA28")
+#define SUNXI_PINCTRL_PIN_PA29 PINCTRL_PIN(PA_BASE + 29, "PA29")
+#define SUNXI_PINCTRL_PIN_PA30 PINCTRL_PIN(PA_BASE + 30, "PA30")
+#define SUNXI_PINCTRL_PIN_PA31 PINCTRL_PIN(PA_BASE + 31, "PA31")
+
+#define SUNXI_PINCTRL_PIN_PB0  PINCTRL_PIN(PB_BASE + 0, "PB0")
+#define SUNXI_PINCTRL_PIN_PB1  PINCTRL_PIN(PB_BASE + 1, "PB1")
+#define SUNXI_PINCTRL_PIN_PB2  PINCTRL_PIN(PB_BASE + 2, "PB2")
+#define SUNXI_PINCTRL_PIN_PB3  PINCTRL_PIN(PB_BASE + 3, "PB3")
+#define SUNXI_PINCTRL_PIN_PB4  PINCTRL_PIN(PB_BASE + 4, "PB4")
+#define SUNXI_PINCTRL_PIN_PB5  PINCTRL_PIN(PB_BASE + 5, "PB5")
+#define SUNXI_PINCTRL_PIN_PB6  PINCTRL_PIN(PB_BASE + 6, "PB6")
+#define SUNXI_PINCTRL_PIN_PB7  PINCTRL_PIN(PB_BASE + 7, "PB7")
+#define SUNXI_PINCTRL_PIN_PB8  PINCTRL_PIN(PB_BASE + 8, "PB8")
+#define SUNXI_PINCTRL_PIN_PB9  PINCTRL_PIN(PB_BASE + 9, "PB9")
+#define SUNXI_PINCTRL_PIN_PB10 PINCTRL_PIN(PB_BASE + 10, "PB10")
+#define SUNXI_PINCTRL_PIN_PB11 PINCTRL_PIN(PB_BASE + 11, "PB11")
+#define SUNXI_PINCTRL_PIN_PB12 PINCTRL_PIN(PB_BASE + 12, "PB12")
+#define SUNXI_PINCTRL_PIN_PB13 PINCTRL_PIN(PB_BASE + 13, "PB13")
+#define SUNXI_PINCTRL_PIN_PB14 PINCTRL_PIN(PB_BASE + 14, "PB14")
+#define SUNXI_PINCTRL_PIN_PB15 PINCTRL_PIN(PB_BASE + 15, "PB15")
+#define SUNXI_PINCTRL_PIN_PB16 PINCTRL_PIN(PB_BASE + 16, "PB16")
+#define SUNXI_PINCTRL_PIN_PB17 PINCTRL_PIN(PB_BASE + 17, "PB17")
+#define SUNXI_PINCTRL_PIN_PB18 PINCTRL_PIN(PB_BASE + 18, "PB18")
+#define SUNXI_PINCTRL_PIN_PB19 PINCTRL_PIN(PB_BASE + 19, "PB19")
+#define SUNXI_PINCTRL_PIN_PB20 PINCTRL_PIN(PB_BASE + 20, "PB20")
+#define SUNXI_PINCTRL_PIN_PB21 PINCTRL_PIN(PB_BASE + 21, "PB21")
+#define SUNXI_PINCTRL_PIN_PB22 PINCTRL_PIN(PB_BASE + 22, "PB22")
+#define SUNXI_PINCTRL_PIN_PB23 PINCTRL_PIN(PB_BASE + 23, "PB23")
+#define SUNXI_PINCTRL_PIN_PB24 PINCTRL_PIN(PB_BASE + 24, "PB24")
+#define SUNXI_PINCTRL_PIN_PB25 PINCTRL_PIN(PB_BASE + 25, "PB25")
+#define SUNXI_PINCTRL_PIN_PB26 PINCTRL_PIN(PB_BASE + 26, "PB26")
+#define SUNXI_PINCTRL_PIN_PB27 PINCTRL_PIN(PB_BASE + 27, "PB27")
+#define SUNXI_PINCTRL_PIN_PB28 PINCTRL_PIN(PB_BASE + 28, "PB28")
+#define SUNXI_PINCTRL_PIN_PB29 PINCTRL_PIN(PB_BASE + 29, "PB29")
+#define SUNXI_PINCTRL_PIN_PB30 PINCTRL_PIN(PB_BASE + 30, "PB30")
+#define SUNXI_PINCTRL_PIN_PB31 PINCTRL_PIN(PB_BASE + 31, "PB31")
+
+#define SUNXI_PINCTRL_PIN_PC0  PINCTRL_PIN(PC_BASE + 0, "PC0")
+#define SUNXI_PINCTRL_PIN_PC1  PINCTRL_PIN(PC_BASE + 1, "PC1")
+#define SUNXI_PINCTRL_PIN_PC2  PINCTRL_PIN(PC_BASE + 2, "PC2")
+#define SUNXI_PINCTRL_PIN_PC3  PINCTRL_PIN(PC_BASE + 3, "PC3")
+#define SUNXI_PINCTRL_PIN_PC4  PINCTRL_PIN(PC_BASE + 4, "PC4")
+#define SUNXI_PINCTRL_PIN_PC5  PINCTRL_PIN(PC_BASE + 5, "PC5")
+#define SUNXI_PINCTRL_PIN_PC6  PINCTRL_PIN(PC_BASE + 6, "PC6")
+#define SUNXI_PINCTRL_PIN_PC7  PINCTRL_PIN(PC_BASE + 7, "PC7")
+#define SUNXI_PINCTRL_PIN_PC8  PINCTRL_PIN(PC_BASE + 8, "PC8")
+#define SUNXI_PINCTRL_PIN_PC9  PINCTRL_PIN(PC_BASE + 9, "PC9")
+#define SUNXI_PINCTRL_PIN_PC10 PINCTRL_PIN(PC_BASE + 10, "PC10")
+#define SUNXI_PINCTRL_PIN_PC11 PINCTRL_PIN(PC_BASE + 11, "PC11")
+#define SUNXI_PINCTRL_PIN_PC12 PINCTRL_PIN(PC_BASE + 12, "PC12")
+#define SUNXI_PINCTRL_PIN_PC13 PINCTRL_PIN(PC_BASE + 13, "PC13")
+#define SUNXI_PINCTRL_PIN_PC14 PINCTRL_PIN(PC_BASE + 14, "PC14")
+#define SUNXI_PINCTRL_PIN_PC15 PINCTRL_PIN(PC_BASE + 15, "PC15")
+#define SUNXI_PINCTRL_PIN_PC16 PINCTRL_PIN(PC_BASE + 16, "PC16")
+#define SUNXI_PINCTRL_PIN_PC17 PINCTRL_PIN(PC_BASE + 17, "PC17")
+#define SUNXI_PINCTRL_PIN_PC18 PINCTRL_PIN(PC_BASE + 18, "PC18")
+#define SUNXI_PINCTRL_PIN_PC19 PINCTRL_PIN(PC_BASE + 19, "PC19")
+#define SUNXI_PINCTRL_PIN_PC20 PINCTRL_PIN(PC_BASE + 20, "PC20")
+#define SUNXI_PINCTRL_PIN_PC21 PINCTRL_PIN(PC_BASE + 21, "PC21")
+#define SUNXI_PINCTRL_PIN_PC22 PINCTRL_PIN(PC_BASE + 22, "PC22")
+#define SUNXI_PINCTRL_PIN_PC23 PINCTRL_PIN(PC_BASE + 23, "PC23")
+#define SUNXI_PINCTRL_PIN_PC24 PINCTRL_PIN(PC_BASE + 24, "PC24")
+#define SUNXI_PINCTRL_PIN_PC25 PINCTRL_PIN(PC_BASE + 25, "PC25")
+#define SUNXI_PINCTRL_PIN_PC26 PINCTRL_PIN(PC_BASE + 26, "PC26")
+#define SUNXI_PINCTRL_PIN_PC27 PINCTRL_PIN(PC_BASE + 27, "PC27")
+#define SUNXI_PINCTRL_PIN_PC28 PINCTRL_PIN(PC_BASE + 28, "PC28")
+#define SUNXI_PINCTRL_PIN_PC29 PINCTRL_PIN(PC_BASE + 29, "PC29")
+#define SUNXI_PINCTRL_PIN_PC30 PINCTRL_PIN(PC_BASE + 30, "PC30")
+#define SUNXI_PINCTRL_PIN_PC31 PINCTRL_PIN(PC_BASE + 31, "PC31")
+
+#define SUNXI_PINCTRL_PIN_PD0  PINCTRL_PIN(PD_BASE + 0, "PD0")
+#define SUNXI_PINCTRL_PIN_PD1  PINCTRL_PIN(PD_BASE + 1, "PD1")
+#define SUNXI_PINCTRL_PIN_PD2  PINCTRL_PIN(PD_BASE + 2, "PD2")
+#define SUNXI_PINCTRL_PIN_PD3  PINCTRL_PIN(PD_BASE + 3, "PD3")
+#define SUNXI_PINCTRL_PIN_PD4  PINCTRL_PIN(PD_BASE + 4, "PD4")
+#define SUNXI_PINCTRL_PIN_PD5  PINCTRL_PIN(PD_BASE + 5, "PD5")
+#define SUNXI_PINCTRL_PIN_PD6  PINCTRL_PIN(PD_BASE + 6, "PD6")
+#define SUNXI_PINCTRL_PIN_PD7  PINCTRL_PIN(PD_BASE + 7, "PD7")
+#define SUNXI_PINCTRL_PIN_PD8  PINCTRL_PIN(PD_BASE + 8, "PD8")
+#define SUNXI_PINCTRL_PIN_PD9  PINCTRL_PIN(PD_BASE + 9, "PD9")
+#define SUNXI_PINCTRL_PIN_PD10 PINCTRL_PIN(PD_BASE + 10, "PD10")
+#define SUNXI_PINCTRL_PIN_PD11 PINCTRL_PIN(PD_BASE + 11, "PD11")
+#define SUNXI_PINCTRL_PIN_PD12 PINCTRL_PIN(PD_BASE + 12, "PD12")
+#define SUNXI_PINCTRL_PIN_PD13 PINCTRL_PIN(PD_BASE + 13, "PD13")
+#define SUNXI_PINCTRL_PIN_PD14 PINCTRL_PIN(PD_BASE + 14, "PD14")
+#define SUNXI_PINCTRL_PIN_PD15 PINCTRL_PIN(PD_BASE + 15, "PD15")
+#define SUNXI_PINCTRL_PIN_PD16 PINCTRL_PIN(PD_BASE + 16, "PD16")
+#define SUNXI_PINCTRL_PIN_PD17 PINCTRL_PIN(PD_BASE + 17, "PD17")
+#define SUNXI_PINCTRL_PIN_PD18 PINCTRL_PIN(PD_BASE + 18, "PD18")
+#define SUNXI_PINCTRL_PIN_PD19 PINCTRL_PIN(PD_BASE + 19, "PD19")
+#define SUNXI_PINCTRL_PIN_PD20 PINCTRL_PIN(PD_BASE + 20, "PD20")
+#define SUNXI_PINCTRL_PIN_PD21 PINCTRL_PIN(PD_BASE + 21, "PD21")
+#define SUNXI_PINCTRL_PIN_PD22 PINCTRL_PIN(PD_BASE + 22, "PD22")
+#define SUNXI_PINCTRL_PIN_PD23 PINCTRL_PIN(PD_BASE + 23, "PD23")
+#define SUNXI_PINCTRL_PIN_PD24 PINCTRL_PIN(PD_BASE + 24, "PD24")
+#define SUNXI_PINCTRL_PIN_PD25 PINCTRL_PIN(PD_BASE + 25, "PD25")
+#define SUNXI_PINCTRL_PIN_PD26 PINCTRL_PIN(PD_BASE + 26, "PD26")
+#define SUNXI_PINCTRL_PIN_PD27 PINCTRL_PIN(PD_BASE + 27, "PD27")
+#define SUNXI_PINCTRL_PIN_PD28 PINCTRL_PIN(PD_BASE + 28, "PD28")
+#define SUNXI_PINCTRL_PIN_PD29 PINCTRL_PIN(PD_BASE + 29, "PD29")
+#define SUNXI_PINCTRL_PIN_PD30 PINCTRL_PIN(PD_BASE + 30, "PD30")
+#define SUNXI_PINCTRL_PIN_PD31 PINCTRL_PIN(PD_BASE + 31, "PD31")
+
+#define SUNXI_PINCTRL_PIN_PE0  PINCTRL_PIN(PE_BASE + 0, "PE0")
+#define SUNXI_PINCTRL_PIN_PE1  PINCTRL_PIN(PE_BASE + 1, "PE1")
+#define SUNXI_PINCTRL_PIN_PE2  PINCTRL_PIN(PE_BASE + 2, "PE2")
+#define SUNXI_PINCTRL_PIN_PE3  PINCTRL_PIN(PE_BASE + 3, "PE3")
+#define SUNXI_PINCTRL_PIN_PE4  PINCTRL_PIN(PE_BASE + 4, "PE4")
+#define SUNXI_PINCTRL_PIN_PE5  PINCTRL_PIN(PE_BASE + 5, "PE5")
+#define SUNXI_PINCTRL_PIN_PE6  PINCTRL_PIN(PE_BASE + 6, "PE6")
+#define SUNXI_PINCTRL_PIN_PE7  PINCTRL_PIN(PE_BASE + 7, "PE7")
+#define SUNXI_PINCTRL_PIN_PE8  PINCTRL_PIN(PE_BASE + 8, "PE8")
+#define SUNXI_PINCTRL_PIN_PE9  PINCTRL_PIN(PE_BASE + 9, "PE9")
+#define SUNXI_PINCTRL_PIN_PE10 PINCTRL_PIN(PE_BASE + 10, "PE10")
+#define SUNXI_PINCTRL_PIN_PE11 PINCTRL_PIN(PE_BASE + 11, "PE11")
+#define SUNXI_PINCTRL_PIN_PE12 PINCTRL_PIN(PE_BASE + 12, "PE12")
+#define SUNXI_PINCTRL_PIN_PE13 PINCTRL_PIN(PE_BASE + 13, "PE13")
+#define SUNXI_PINCTRL_PIN_PE14 PINCTRL_PIN(PE_BASE + 14, "PE14")
+#define SUNXI_PINCTRL_PIN_PE15 PINCTRL_PIN(PE_BASE + 15, "PE15")
+#define SUNXI_PINCTRL_PIN_PE16 PINCTRL_PIN(PE_BASE + 16, "PE16")
+#define SUNXI_PINCTRL_PIN_PE17 PINCTRL_PIN(PE_BASE + 17, "PE17")
+#define SUNXI_PINCTRL_PIN_PE18 PINCTRL_PIN(PE_BASE + 18, "PE18")
+#define SUNXI_PINCTRL_PIN_PE19 PINCTRL_PIN(PE_BASE + 19, "PE19")
+#define SUNXI_PINCTRL_PIN_PE20 PINCTRL_PIN(PE_BASE + 20, "PE20")
+#define SUNXI_PINCTRL_PIN_PE21 PINCTRL_PIN(PE_BASE + 21, "PE21")
+#define SUNXI_PINCTRL_PIN_PE22 PINCTRL_PIN(PE_BASE + 22, "PE22")
+#define SUNXI_PINCTRL_PIN_PE23 PINCTRL_PIN(PE_BASE + 23, "PE23")
+#define SUNXI_PINCTRL_PIN_PE24 PINCTRL_PIN(PE_BASE + 24, "PE24")
+#define SUNXI_PINCTRL_PIN_PE25 PINCTRL_PIN(PE_BASE + 25, "PE25")
+#define SUNXI_PINCTRL_PIN_PE26 PINCTRL_PIN(PE_BASE + 26, "PE26")
+#define SUNXI_PINCTRL_PIN_PE27 PINCTRL_PIN(PE_BASE + 27, "PE27")
+#define SUNXI_PINCTRL_PIN_PE28 PINCTRL_PIN(PE_BASE + 28, "PE28")
+#define SUNXI_PINCTRL_PIN_PE29 PINCTRL_PIN(PE_BASE + 29, "PE29")
+#define SUNXI_PINCTRL_PIN_PE30 PINCTRL_PIN(PE_BASE + 30, "PE30")
+#define SUNXI_PINCTRL_PIN_PE31 PINCTRL_PIN(PE_BASE + 31, "PE31")
+
+#define SUNXI_PINCTRL_PIN_PF0  PINCTRL_PIN(PF_BASE + 0, "PF0")
+#define SUNXI_PINCTRL_PIN_PF1  PINCTRL_PIN(PF_BASE + 1, "PF1")
+#define SUNXI_PINCTRL_PIN_PF2  PINCTRL_PIN(PF_BASE + 2, "PF2")
+#define SUNXI_PINCTRL_PIN_PF3  PINCTRL_PIN(PF_BASE + 3, "PF3")
+#define SUNXI_PINCTRL_PIN_PF4  PINCTRL_PIN(PF_BASE + 4, "PF4")
+#define SUNXI_PINCTRL_PIN_PF5  PINCTRL_PIN(PF_BASE + 5, "PF5")
+#define SUNXI_PINCTRL_PIN_PF6  PINCTRL_PIN(PF_BASE + 6, "PF6")
+#define SUNXI_PINCTRL_PIN_PF7  PINCTRL_PIN(PF_BASE + 7, "PF7")
+#define SUNXI_PINCTRL_PIN_PF8  PINCTRL_PIN(PF_BASE + 8, "PF8")
+#define SUNXI_PINCTRL_PIN_PF9  PINCTRL_PIN(PF_BASE + 9, "PF9")
+#define SUNXI_PINCTRL_PIN_PF10 PINCTRL_PIN(PF_BASE + 10, "PF10")
+#define SUNXI_PINCTRL_PIN_PF11 PINCTRL_PIN(PF_BASE + 11, "PF11")
+#define SUNXI_PINCTRL_PIN_PF12 PINCTRL_PIN(PF_BASE + 12, "PF12")
+#define SUNXI_PINCTRL_PIN_PF13 PINCTRL_PIN(PF_BASE + 13, "PF13")
+#define SUNXI_PINCTRL_PIN_PF14 PINCTRL_PIN(PF_BASE + 14, "PF14")
+#define SUNXI_PINCTRL_PIN_PF15 PINCTRL_PIN(PF_BASE + 15, "PF15")
+#define SUNXI_PINCTRL_PIN_PF16 PINCTRL_PIN(PF_BASE + 16, "PF16")
+#define SUNXI_PINCTRL_PIN_PF17 PINCTRL_PIN(PF_BASE + 17, "PF17")
+#define SUNXI_PINCTRL_PIN_PF18 PINCTRL_PIN(PF_BASE + 18, "PF18")
+#define SUNXI_PINCTRL_PIN_PF19 PINCTRL_PIN(PF_BASE + 19, "PF19")
+#define SUNXI_PINCTRL_PIN_PF20 PINCTRL_PIN(PF_BASE + 20, "PF20")
+#define SUNXI_PINCTRL_PIN_PF21 PINCTRL_PIN(PF_BASE + 21, "PF21")
+#define SUNXI_PINCTRL_PIN_PF22 PINCTRL_PIN(PF_BASE + 22, "PF22")
+#define SUNXI_PINCTRL_PIN_PF23 PINCTRL_PIN(PF_BASE + 23, "PF23")
+#define SUNXI_PINCTRL_PIN_PF24 PINCTRL_PIN(PF_BASE + 24, "PF24")
+#define SUNXI_PINCTRL_PIN_PF25 PINCTRL_PIN(PF_BASE + 25, "PF25")
+#define SUNXI_PINCTRL_PIN_PF26 PINCTRL_PIN(PF_BASE + 26, "PF26")
+#define SUNXI_PINCTRL_PIN_PF27 PINCTRL_PIN(PF_BASE + 27, "PF27")
+#define SUNXI_PINCTRL_PIN_PF28 PINCTRL_PIN(PF_BASE + 28, "PF28")
+#define SUNXI_PINCTRL_PIN_PF29 PINCTRL_PIN(PF_BASE + 29, "PF29")
+#define SUNXI_PINCTRL_PIN_PF30 PINCTRL_PIN(PF_BASE + 30, "PF30")
+#define SUNXI_PINCTRL_PIN_PF31 PINCTRL_PIN(PF_BASE + 31, "PF31")
+
+#define SUNXI_PINCTRL_PIN_PG0  PINCTRL_PIN(PG_BASE + 0, "PG0")
+#define SUNXI_PINCTRL_PIN_PG1  PINCTRL_PIN(PG_BASE + 1, "PG1")
+#define SUNXI_PINCTRL_PIN_PG2  PINCTRL_PIN(PG_BASE + 2, "PG2")
+#define SUNXI_PINCTRL_PIN_PG3  PINCTRL_PIN(PG_BASE + 3, "PG3")
+#define SUNXI_PINCTRL_PIN_PG4  PINCTRL_PIN(PG_BASE + 4, "PG4")
+#define SUNXI_PINCTRL_PIN_PG5  PINCTRL_PIN(PG_BASE + 5, "PG5")
+#define SUNXI_PINCTRL_PIN_PG6  PINCTRL_PIN(PG_BASE + 6, "PG6")
+#define SUNXI_PINCTRL_PIN_PG7  PINCTRL_PIN(PG_BASE + 7, "PG7")
+#define SUNXI_PINCTRL_PIN_PG8  PINCTRL_PIN(PG_BASE + 8, "PG8")
+#define SUNXI_PINCTRL_PIN_PG9  PINCTRL_PIN(PG_BASE + 9, "PG9")
+#define SUNXI_PINCTRL_PIN_PG10 PINCTRL_PIN(PG_BASE + 10, "PG10")
+#define SUNXI_PINCTRL_PIN_PG11 PINCTRL_PIN(PG_BASE + 11, "PG11")
+#define SUNXI_PINCTRL_PIN_PG12 PINCTRL_PIN(PG_BASE + 12, "PG12")
+#define SUNXI_PINCTRL_PIN_PG13 PINCTRL_PIN(PG_BASE + 13, "PG13")
+#define SUNXI_PINCTRL_PIN_PG14 PINCTRL_PIN(PG_BASE + 14, "PG14")
+#define SUNXI_PINCTRL_PIN_PG15 PINCTRL_PIN(PG_BASE + 15, "PG15")
+#define SUNXI_PINCTRL_PIN_PG16 PINCTRL_PIN(PG_BASE + 16, "PG16")
+#define SUNXI_PINCTRL_PIN_PG17 PINCTRL_PIN(PG_BASE + 17, "PG17")
+#define SUNXI_PINCTRL_PIN_PG18 PINCTRL_PIN(PG_BASE + 18, "PG18")
+#define SUNXI_PINCTRL_PIN_PG19 PINCTRL_PIN(PG_BASE + 19, "PG19")
+#define SUNXI_PINCTRL_PIN_PG20 PINCTRL_PIN(PG_BASE + 20, "PG20")
+#define SUNXI_PINCTRL_PIN_PG21 PINCTRL_PIN(PG_BASE + 21, "PG21")
+#define SUNXI_PINCTRL_PIN_PG22 PINCTRL_PIN(PG_BASE + 22, "PG22")
+#define SUNXI_PINCTRL_PIN_PG23 PINCTRL_PIN(PG_BASE + 23, "PG23")
+#define SUNXI_PINCTRL_PIN_PG24 PINCTRL_PIN(PG_BASE + 24, "PG24")
+#define SUNXI_PINCTRL_PIN_PG25 PINCTRL_PIN(PG_BASE + 25, "PG25")
+#define SUNXI_PINCTRL_PIN_PG26 PINCTRL_PIN(PG_BASE + 26, "PG26")
+#define SUNXI_PINCTRL_PIN_PG27 PINCTRL_PIN(PG_BASE + 27, "PG27")
+#define SUNXI_PINCTRL_PIN_PG28 PINCTRL_PIN(PG_BASE + 28, "PG28")
+#define SUNXI_PINCTRL_PIN_PG29 PINCTRL_PIN(PG_BASE + 29, "PG29")
+#define SUNXI_PINCTRL_PIN_PG30 PINCTRL_PIN(PG_BASE + 30, "PG30")
+#define SUNXI_PINCTRL_PIN_PG31 PINCTRL_PIN(PG_BASE + 31, "PG31")
+
+#define SUNXI_PINCTRL_PIN_PH0  PINCTRL_PIN(PH_BASE + 0, "PH0")
+#define SUNXI_PINCTRL_PIN_PH1  PINCTRL_PIN(PH_BASE + 1, "PH1")
+#define SUNXI_PINCTRL_PIN_PH2  PINCTRL_PIN(PH_BASE + 2, "PH2")
+#define SUNXI_PINCTRL_PIN_PH3  PINCTRL_PIN(PH_BASE + 3, "PH3")
+#define SUNXI_PINCTRL_PIN_PH4  PINCTRL_PIN(PH_BASE + 4, "PH4")
+#define SUNXI_PINCTRL_PIN_PH5  PINCTRL_PIN(PH_BASE + 5, "PH5")
+#define SUNXI_PINCTRL_PIN_PH6  PINCTRL_PIN(PH_BASE + 6, "PH6")
+#define SUNXI_PINCTRL_PIN_PH7  PINCTRL_PIN(PH_BASE + 7, "PH7")
+#define SUNXI_PINCTRL_PIN_PH8  PINCTRL_PIN(PH_BASE + 8, "PH8")
+#define SUNXI_PINCTRL_PIN_PH9  PINCTRL_PIN(PH_BASE + 9, "PH9")
+#define SUNXI_PINCTRL_PIN_PH10 PINCTRL_PIN(PH_BASE + 10, "PH10")
+#define SUNXI_PINCTRL_PIN_PH11 PINCTRL_PIN(PH_BASE + 11, "PH11")
+#define SUNXI_PINCTRL_PIN_PH12 PINCTRL_PIN(PH_BASE + 12, "PH12")
+#define SUNXI_PINCTRL_PIN_PH13 PINCTRL_PIN(PH_BASE + 13, "PH13")
+#define SUNXI_PINCTRL_PIN_PH14 PINCTRL_PIN(PH_BASE + 14, "PH14")
+#define SUNXI_PINCTRL_PIN_PH15 PINCTRL_PIN(PH_BASE + 15, "PH15")
+#define SUNXI_PINCTRL_PIN_PH16 PINCTRL_PIN(PH_BASE + 16, "PH16")
+#define SUNXI_PINCTRL_PIN_PH17 PINCTRL_PIN(PH_BASE + 17, "PH17")
+#define SUNXI_PINCTRL_PIN_PH18 PINCTRL_PIN(PH_BASE + 18, "PH18")
+#define SUNXI_PINCTRL_PIN_PH19 PINCTRL_PIN(PH_BASE + 19, "PH19")
+#define SUNXI_PINCTRL_PIN_PH20 PINCTRL_PIN(PH_BASE + 20, "PH20")
+#define SUNXI_PINCTRL_PIN_PH21 PINCTRL_PIN(PH_BASE + 21, "PH21")
+#define SUNXI_PINCTRL_PIN_PH22 PINCTRL_PIN(PH_BASE + 22, "PH22")
+#define SUNXI_PINCTRL_PIN_PH23 PINCTRL_PIN(PH_BASE + 23, "PH23")
+#define SUNXI_PINCTRL_PIN_PH24 PINCTRL_PIN(PH_BASE + 24, "PH24")
+#define SUNXI_PINCTRL_PIN_PH25 PINCTRL_PIN(PH_BASE + 25, "PH25")
+#define SUNXI_PINCTRL_PIN_PH26 PINCTRL_PIN(PH_BASE + 26, "PH26")
+#define SUNXI_PINCTRL_PIN_PH27 PINCTRL_PIN(PH_BASE + 27, "PH27")
+#define SUNXI_PINCTRL_PIN_PH28 PINCTRL_PIN(PH_BASE + 28, "PH28")
+#define SUNXI_PINCTRL_PIN_PH29 PINCTRL_PIN(PH_BASE + 29, "PH29")
+#define SUNXI_PINCTRL_PIN_PH30 PINCTRL_PIN(PH_BASE + 30, "PH30")
+#define SUNXI_PINCTRL_PIN_PH31 PINCTRL_PIN(PH_BASE + 31, "PH31")
+
+#define SUNXI_PINCTRL_PIN_PI0  PINCTRL_PIN(PI_BASE + 0, "PI0")
+#define SUNXI_PINCTRL_PIN_PI1  PINCTRL_PIN(PI_BASE + 1, "PI1")
+#define SUNXI_PINCTRL_PIN_PI2  PINCTRL_PIN(PI_BASE + 2, "PI2")
+#define SUNXI_PINCTRL_PIN_PI3  PINCTRL_PIN(PI_BASE + 3, "PI3")
+#define SUNXI_PINCTRL_PIN_PI4  PINCTRL_PIN(PI_BASE + 4, "PI4")
+#define SUNXI_PINCTRL_PIN_PI5  PINCTRL_PIN(PI_BASE + 5, "PI5")
+#define SUNXI_PINCTRL_PIN_PI6  PINCTRL_PIN(PI_BASE + 6, "PI6")
+#define SUNXI_PINCTRL_PIN_PI7  PINCTRL_PIN(PI_BASE + 7, "PI7")
+#define SUNXI_PINCTRL_PIN_PI8  PINCTRL_PIN(PI_BASE + 8, "PI8")
+#define SUNXI_PINCTRL_PIN_PI9  PINCTRL_PIN(PI_BASE + 9, "PI9")
+#define SUNXI_PINCTRL_PIN_PI10 PINCTRL_PIN(PI_BASE + 10, "PI10")
+#define SUNXI_PINCTRL_PIN_PI11 PINCTRL_PIN(PI_BASE + 11, "PI11")
+#define SUNXI_PINCTRL_PIN_PI12 PINCTRL_PIN(PI_BASE + 12, "PI12")
+#define SUNXI_PINCTRL_PIN_PI13 PINCTRL_PIN(PI_BASE + 13, "PI13")
+#define SUNXI_PINCTRL_PIN_PI14 PINCTRL_PIN(PI_BASE + 14, "PI14")
+#define SUNXI_PINCTRL_PIN_PI15 PINCTRL_PIN(PI_BASE + 15, "PI15")
+#define SUNXI_PINCTRL_PIN_PI16 PINCTRL_PIN(PI_BASE + 16, "PI16")
+#define SUNXI_PINCTRL_PIN_PI17 PINCTRL_PIN(PI_BASE + 17, "PI17")
+#define SUNXI_PINCTRL_PIN_PI18 PINCTRL_PIN(PI_BASE + 18, "PI18")
+#define SUNXI_PINCTRL_PIN_PI19 PINCTRL_PIN(PI_BASE + 19, "PI19")
+#define SUNXI_PINCTRL_PIN_PI20 PINCTRL_PIN(PI_BASE + 20, "PI20")
+#define SUNXI_PINCTRL_PIN_PI21 PINCTRL_PIN(PI_BASE + 21, "PI21")
+#define SUNXI_PINCTRL_PIN_PI22 PINCTRL_PIN(PI_BASE + 22, "PI22")
+#define SUNXI_PINCTRL_PIN_PI23 PINCTRL_PIN(PI_BASE + 23, "PI23")
+#define SUNXI_PINCTRL_PIN_PI24 PINCTRL_PIN(PI_BASE + 24, "PI24")
+#define SUNXI_PINCTRL_PIN_PI25 PINCTRL_PIN(PI_BASE + 25, "PI25")
+#define SUNXI_PINCTRL_PIN_PI26 PINCTRL_PIN(PI_BASE + 26, "PI26")
+#define SUNXI_PINCTRL_PIN_PI27 PINCTRL_PIN(PI_BASE + 27, "PI27")
+#define SUNXI_PINCTRL_PIN_PI28 PINCTRL_PIN(PI_BASE + 28, "PI28")
+#define SUNXI_PINCTRL_PIN_PI29 PINCTRL_PIN(PI_BASE + 29, "PI29")
+#define SUNXI_PINCTRL_PIN_PI30 PINCTRL_PIN(PI_BASE + 30, "PI30")
+#define SUNXI_PINCTRL_PIN_PI31 PINCTRL_PIN(PI_BASE + 31, "PI31")
+
+#define SUNXI_PIN_NAME_MAX_LEN 5
+
+#define BANK_MEM_SIZE          0x24
+#define MUX_REGS_OFFSET                0x0
+#define DATA_REGS_OFFSET       0x10
+#define DLEVEL_REGS_OFFSET     0x14
+#define PULL_REGS_OFFSET       0x1c
+
+#define PINS_PER_BANK          32
+#define MUX_PINS_PER_REG       8
+#define MUX_PINS_BITS          4
+#define MUX_PINS_MASK          0x0f
+#define DATA_PINS_PER_REG      32
+#define DATA_PINS_BITS         1
+#define DATA_PINS_MASK         0x01
+#define DLEVEL_PINS_PER_REG    16
+#define DLEVEL_PINS_BITS       2
+#define DLEVEL_PINS_MASK       0x03
+#define PULL_PINS_PER_REG      16
+#define PULL_PINS_BITS         2
+#define PULL_PINS_MASK         0x03
+
+struct sunxi_desc_function {
+       const char      *name;
+       u8              muxval;
+};
+
+struct sunxi_desc_pin {
+       struct pinctrl_pin_desc         pin;
+       struct sunxi_desc_function      *functions;
+};
+
+struct sunxi_pinctrl_desc {
+       const struct sunxi_desc_pin     *pins;
+       int                             npins;
+       struct pinctrl_gpio_range       *ranges;
+       int                             nranges;
+};
+
+struct sunxi_pinctrl_function {
+       const char      *name;
+       const char      **groups;
+       unsigned        ngroups;
+};
+
+struct sunxi_pinctrl_group {
+       const char      *name;
+       unsigned long   config;
+       unsigned        pin;
+};
+
+struct sunxi_pinctrl {
+       void __iomem                    *membase;
+       struct gpio_chip                *chip;
+       struct sunxi_pinctrl_desc       *desc;
+       struct device                   *dev;
+       struct sunxi_pinctrl_function   *functions;
+       unsigned                        nfunctions;
+       struct sunxi_pinctrl_group      *groups;
+       unsigned                        ngroups;
+       struct pinctrl_dev              *pctl_dev;
+};
+
+#define SUNXI_PIN(_pin, ...)                                   \
+       {                                                       \
+               .pin = _pin,                                    \
+               .functions = (struct sunxi_desc_function[]){    \
+                       __VA_ARGS__, { } },                     \
+       }
+
+#define SUNXI_FUNCTION(_val, _name)                            \
+       {                                                       \
+               .name = _name,                                  \
+               .muxval = _val,                                 \
+       }
+
+/*
+ * The sunXi PIO registers are organized as is:
+ * 0x00 - 0x0c Muxing values.
+ *             8 pins per register, each pin having a 4bits value
+ * 0x10                Pin values
+ *             32 bits per register, each pin corresponding to one bit
+ * 0x14 - 0x18 Drive level
+ *             16 pins per register, each pin having a 2bits value
+ * 0x1c - 0x20 Pull-Up values
+ *             16 pins per register, each pin having a 2bits value
+ *
+ * This is for the first bank. Each bank will have the same layout,
+ * with an offset being a multiple of 0x24.
+ *
+ * The following functions calculate from the pin number the register
+ * and the bit offset that we should access.
+ */
+static inline u32 sunxi_mux_reg(u16 pin)
+{
+       u8 bank = pin / PINS_PER_BANK;
+       u32 offset = bank * BANK_MEM_SIZE;
+       offset += MUX_REGS_OFFSET;
+       offset += pin % PINS_PER_BANK / MUX_PINS_PER_REG * 0x04;
+       return round_down(offset, 4);
+}
+
+static inline u32 sunxi_mux_offset(u16 pin)
+{
+       u32 pin_num = pin % MUX_PINS_PER_REG;
+       return pin_num * MUX_PINS_BITS;
+}
+
+static inline u32 sunxi_data_reg(u16 pin)
+{
+       u8 bank = pin / PINS_PER_BANK;
+       u32 offset = bank * BANK_MEM_SIZE;
+       offset += DATA_REGS_OFFSET;
+       offset += pin % PINS_PER_BANK / DATA_PINS_PER_REG * 0x04;
+       return round_down(offset, 4);
+}
+
+static inline u32 sunxi_data_offset(u16 pin)
+{
+       u32 pin_num = pin % DATA_PINS_PER_REG;
+       return pin_num * DATA_PINS_BITS;
+}
+
+static inline u32 sunxi_dlevel_reg(u16 pin)
+{
+       u8 bank = pin / PINS_PER_BANK;
+       u32 offset = bank * BANK_MEM_SIZE;
+       offset += DLEVEL_REGS_OFFSET;
+       offset += pin % PINS_PER_BANK / DLEVEL_PINS_PER_REG * 0x04;
+       return round_down(offset, 4);
+}
+
+static inline u32 sunxi_dlevel_offset(u16 pin)
+{
+       u32 pin_num = pin % DLEVEL_PINS_PER_REG;
+       return pin_num * DLEVEL_PINS_BITS;
+}
+
+static inline u32 sunxi_pull_reg(u16 pin)
+{
+       u8 bank = pin / PINS_PER_BANK;
+       u32 offset = bank * BANK_MEM_SIZE;
+       offset += PULL_REGS_OFFSET;
+       offset += pin % PINS_PER_BANK / PULL_PINS_PER_REG * 0x04;
+       return round_down(offset, 4);
+}
+
+static inline u32 sunxi_pull_offset(u16 pin)
+{
+       u32 pin_num = pin % PULL_PINS_PER_REG;
+       return pin_num * PULL_PINS_BITS;
+}
+
+#endif /* __PINCTRL_SUNXI_H */
index ae1e4bb3259da953b448df78d31df92b79fa8a9b..f195d77a35729f6496914be6d91a7236f136084d 100644 (file)
@@ -201,6 +201,7 @@ static const struct cfg_param {
        {"nvidia,open-drain",           TEGRA_PINCONF_PARAM_OPEN_DRAIN},
        {"nvidia,lock",                 TEGRA_PINCONF_PARAM_LOCK},
        {"nvidia,io-reset",             TEGRA_PINCONF_PARAM_IORESET},
+       {"nvidia,rcv-sel",              TEGRA_PINCONF_PARAM_RCV_SEL},
        {"nvidia,high-speed-mode",      TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE},
        {"nvidia,schmitt",              TEGRA_PINCONF_PARAM_SCHMITT},
        {"nvidia,low-power-mode",       TEGRA_PINCONF_PARAM_LOW_POWER_MODE},
@@ -208,6 +209,7 @@ static const struct cfg_param {
        {"nvidia,pull-up-strength",     TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH},
        {"nvidia,slew-rate-falling",    TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
        {"nvidia,slew-rate-rising",     TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
+       {"nvidia,drive-type",           TEGRA_PINCONF_PARAM_DRIVE_TYPE},
 };
 
 static int tegra_pinctrl_dt_subnode_to_map(struct device *dev,
@@ -450,6 +452,12 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
                *bit = g->ioreset_bit;
                *width = 1;
                break;
+       case TEGRA_PINCONF_PARAM_RCV_SEL:
+               *bank = g->rcv_sel_bank;
+               *reg = g->rcv_sel_reg;
+               *bit = g->rcv_sel_bit;
+               *width = 1;
+               break;
        case TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE:
                *bank = g->drv_bank;
                *reg = g->drv_reg;
@@ -492,6 +500,12 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
                *bit = g->slwr_bit;
                *width = g->slwr_width;
                break;
+       case TEGRA_PINCONF_PARAM_DRIVE_TYPE:
+               *bank = g->drvtype_bank;
+               *reg = g->drvtype_reg;
+               *bit = g->drvtype_bit;
+               *width = 2;
+               break;
        default:
                dev_err(pmx->dev, "Invalid config param %04x\n", param);
                return -ENOTSUPP;
index 62e380965c68f3a6db0820a91373b664d8789054..817f7061dc4cd0b332488dc92c2c647cd19bc149 100644 (file)
@@ -30,6 +30,8 @@ enum tegra_pinconf_param {
        /* argument: Boolean */
        TEGRA_PINCONF_PARAM_IORESET,
        /* argument: Boolean */
+       TEGRA_PINCONF_PARAM_RCV_SEL,
+       /* argument: Boolean */
        TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE,
        /* argument: Boolean */
        TEGRA_PINCONF_PARAM_SCHMITT,
@@ -43,6 +45,8 @@ enum tegra_pinconf_param {
        TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING,
        /* argument: Integer, range is HW-dependant */
        TEGRA_PINCONF_PARAM_SLEW_RATE_RISING,
+       /* argument: Integer, range is HW-dependant */
+       TEGRA_PINCONF_PARAM_DRIVE_TYPE,
 };
 
 enum tegra_pinconf_pull {
@@ -95,6 +99,9 @@ struct tegra_function {
  * @ioreset_reg:       IO reset register offset. -1 if unsupported.
  * @ioreset_bank:      IO reset register bank. 0 if unsupported.
  * @ioreset_bit:       IO reset register bit. 0 if unsupported.
+ * @rcv_sel_reg:       Receiver select offset. -1 if unsupported.
+ * @rcv_sel_bank:      Receiver select bank. 0 if unsupported.
+ * @rcv_sel_bit:       Receiver select bit. 0 if unsupported.
  * @drv_reg:           Drive fields register offset. -1 if unsupported.
  *                     This register contains the hsm, schmitt, lpmd, drvdn,
  *                     drvup, slwr, and slwf parameters.
@@ -110,6 +117,9 @@ struct tegra_function {
  * @slwr_width:                Slew Rising field width. 0 if unsupported.
  * @slwf_bit:          Slew Falling register bit. 0 if unsupported.
  * @slwf_width:                Slew Falling field width. 0 if unsupported.
+ * @drvtype_reg:       Drive type fields register offset. -1 if unsupported.
+ * @drvtype_bank:      Drive type fields register bank. 0 if unsupported.
+ * @drvtype_bit:       Drive type register bit. 0 if unsupported.
  *
  * A representation of a group of pins (possibly just one pin) in the Tegra
  * pin controller. Each group allows some parameter or parameters to be
@@ -131,15 +141,19 @@ struct tegra_pingroup {
        s16 odrain_reg;
        s16 lock_reg;
        s16 ioreset_reg;
+       s16 rcv_sel_reg;
        s16 drv_reg;
+       s16 drvtype_reg;
        u32 mux_bank:2;
        u32 pupd_bank:2;
        u32 tri_bank:2;
        u32 einput_bank:2;
        u32 odrain_bank:2;
        u32 ioreset_bank:2;
+       u32 rcv_sel_bank:2;
        u32 lock_bank:2;
        u32 drv_bank:2;
+       u32 drvtype_bank:2;
        u32 mux_bit:5;
        u32 pupd_bit:5;
        u32 tri_bit:5;
@@ -147,6 +161,7 @@ struct tegra_pingroup {
        u32 odrain_bit:5;
        u32 lock_bit:5;
        u32 ioreset_bit:5;
+       u32 rcv_sel_bit:5;
        u32 hsm_bit:5;
        u32 schmitt_bit:5;
        u32 lpmd_bit:5;
@@ -154,6 +169,7 @@ struct tegra_pingroup {
        u32 drvup_bit:5;
        u32 slwr_bit:5;
        u32 slwf_bit:5;
+       u32 drvtype_bit:5;
        u32 drvdn_width:6;
        u32 drvup_width:6;
        u32 slwr_width:6;
diff --git a/drivers/pinctrl/pinctrl-tegra114.c b/drivers/pinctrl/pinctrl-tegra114.c
new file mode 100644 (file)
index 0000000..622c485
--- /dev/null
@@ -0,0 +1,2769 @@
+/*
+ * Pinctrl data and driver for the NVIDIA Tegra114 pinmux
+ *
+ * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Arthur:  Pritesh Raithatha <praithatha@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-tegra.h"
+
+/*
+ * Most pins affected by the pinmux can also be GPIOs. Define these first.
+ * These must match how the GPIO driver names/numbers its pins.
+ */
+#define _GPIO(offset)                          (offset)
+
+#define TEGRA_PIN_CLK_32K_OUT_PA0              _GPIO(0)
+#define TEGRA_PIN_UART3_CTS_N_PA1              _GPIO(1)
+#define TEGRA_PIN_DAP2_FS_PA2                  _GPIO(2)
+#define TEGRA_PIN_DAP2_SCLK_PA3                        _GPIO(3)
+#define TEGRA_PIN_DAP2_DIN_PA4                 _GPIO(4)
+#define TEGRA_PIN_DAP2_DOUT_PA5                        _GPIO(5)
+#define TEGRA_PIN_SDMMC3_CLK_PA6               _GPIO(6)
+#define TEGRA_PIN_SDMMC3_CMD_PA7               _GPIO(7)
+#define TEGRA_PIN_GMI_A17_PB0                  _GPIO(8)
+#define TEGRA_PIN_GMI_A18_PB1                  _GPIO(9)
+#define TEGRA_PIN_SDMMC3_DAT3_PB4              _GPIO(12)
+#define TEGRA_PIN_SDMMC3_DAT2_PB5              _GPIO(13)
+#define TEGRA_PIN_SDMMC3_DAT1_PB6              _GPIO(14)
+#define TEGRA_PIN_SDMMC3_DAT0_PB7              _GPIO(15)
+#define TEGRA_PIN_UART3_RTS_N_PC0              _GPIO(16)
+#define TEGRA_PIN_UART2_TXD_PC2                        _GPIO(18)
+#define TEGRA_PIN_UART2_RXD_PC3                        _GPIO(19)
+#define TEGRA_PIN_GEN1_I2C_SCL_PC4             _GPIO(20)
+#define TEGRA_PIN_GEN1_I2C_SDA_PC5             _GPIO(21)
+#define TEGRA_PIN_GMI_WP_N_PC7                 _GPIO(23)
+#define TEGRA_PIN_GMI_AD0_PG0                  _GPIO(48)
+#define TEGRA_PIN_GMI_AD1_PG1                  _GPIO(49)
+#define TEGRA_PIN_GMI_AD2_PG2                  _GPIO(50)
+#define TEGRA_PIN_GMI_AD3_PG3                  _GPIO(51)
+#define TEGRA_PIN_GMI_AD4_PG4                  _GPIO(52)
+#define TEGRA_PIN_GMI_AD5_PG5                  _GPIO(53)
+#define TEGRA_PIN_GMI_AD6_PG6                  _GPIO(54)
+#define TEGRA_PIN_GMI_AD7_PG7                  _GPIO(55)
+#define TEGRA_PIN_GMI_AD8_PH0                  _GPIO(56)
+#define TEGRA_PIN_GMI_AD9_PH1                  _GPIO(57)
+#define TEGRA_PIN_GMI_AD10_PH2                 _GPIO(58)
+#define TEGRA_PIN_GMI_AD11_PH3                 _GPIO(59)
+#define TEGRA_PIN_GMI_AD12_PH4                 _GPIO(60)
+#define TEGRA_PIN_GMI_AD13_PH5                 _GPIO(61)
+#define TEGRA_PIN_GMI_AD14_PH6                 _GPIO(62)
+#define TEGRA_PIN_GMI_AD15_PH7                 _GPIO(63)
+#define TEGRA_PIN_GMI_WR_N_PI0                 _GPIO(64)
+#define TEGRA_PIN_GMI_OE_N_PI1                 _GPIO(65)
+#define TEGRA_PIN_GMI_CS6_N_PI3                        _GPIO(67)
+#define TEGRA_PIN_GMI_RST_N_PI4                        _GPIO(68)
+#define TEGRA_PIN_GMI_IORDY_PI5                        _GPIO(69)
+#define TEGRA_PIN_GMI_CS7_N_PI6                        _GPIO(70)
+#define TEGRA_PIN_GMI_WAIT_PI7                 _GPIO(71)
+#define TEGRA_PIN_GMI_CS0_N_PJ0                        _GPIO(72)
+#define TEGRA_PIN_GMI_CS1_N_PJ2                        _GPIO(74)
+#define TEGRA_PIN_GMI_DQS_P_PJ3                        _GPIO(75)
+#define TEGRA_PIN_UART2_CTS_N_PJ5              _GPIO(77)
+#define TEGRA_PIN_UART2_RTS_N_PJ6              _GPIO(78)
+#define TEGRA_PIN_GMI_A16_PJ7                  _GPIO(79)
+#define TEGRA_PIN_GMI_ADV_N_PK0                        _GPIO(80)
+#define TEGRA_PIN_GMI_CLK_PK1                  _GPIO(81)
+#define TEGRA_PIN_GMI_CS4_N_PK2                        _GPIO(82)
+#define TEGRA_PIN_GMI_CS2_N_PK3                        _GPIO(83)
+#define TEGRA_PIN_GMI_CS3_N_PK4                        _GPIO(84)
+#define TEGRA_PIN_SPDIF_OUT_PK5                        _GPIO(85)
+#define TEGRA_PIN_SPDIF_IN_PK6                 _GPIO(86)
+#define TEGRA_PIN_GMI_A19_PK7                  _GPIO(87)
+#define TEGRA_PIN_DAP1_FS_PN0                  _GPIO(104)
+#define TEGRA_PIN_DAP1_DIN_PN1                 _GPIO(105)
+#define TEGRA_PIN_DAP1_DOUT_PN2                        _GPIO(106)
+#define TEGRA_PIN_DAP1_SCLK_PN3                        _GPIO(107)
+#define TEGRA_PIN_USB_VBUS_EN0_PN4             _GPIO(108)
+#define TEGRA_PIN_USB_VBUS_EN1_PN5             _GPIO(109)
+#define TEGRA_PIN_HDMI_INT_PN7                 _GPIO(111)
+#define TEGRA_PIN_ULPI_DATA7_PO0               _GPIO(112)
+#define TEGRA_PIN_ULPI_DATA0_PO1               _GPIO(113)
+#define TEGRA_PIN_ULPI_DATA1_PO2               _GPIO(114)
+#define TEGRA_PIN_ULPI_DATA2_PO3               _GPIO(115)
+#define TEGRA_PIN_ULPI_DATA3_PO4               _GPIO(116)
+#define TEGRA_PIN_ULPI_DATA4_PO5               _GPIO(117)
+#define TEGRA_PIN_ULPI_DATA5_PO6               _GPIO(118)
+#define TEGRA_PIN_ULPI_DATA6_PO7               _GPIO(119)
+#define TEGRA_PIN_DAP3_FS_PP0                  _GPIO(120)
+#define TEGRA_PIN_DAP3_DIN_PP1                 _GPIO(121)
+#define TEGRA_PIN_DAP3_DOUT_PP2                        _GPIO(122)
+#define TEGRA_PIN_DAP3_SCLK_PP3                        _GPIO(123)
+#define TEGRA_PIN_DAP4_FS_PP4                  _GPIO(124)
+#define TEGRA_PIN_DAP4_DIN_PP5                 _GPIO(125)
+#define TEGRA_PIN_DAP4_DOUT_PP6                        _GPIO(126)
+#define TEGRA_PIN_DAP4_SCLK_PP7                        _GPIO(127)
+#define TEGRA_PIN_KB_COL0_PQ0                  _GPIO(128)
+#define TEGRA_PIN_KB_COL1_PQ1                  _GPIO(129)
+#define TEGRA_PIN_KB_COL2_PQ2                  _GPIO(130)
+#define TEGRA_PIN_KB_COL3_PQ3                  _GPIO(131)
+#define TEGRA_PIN_KB_COL4_PQ4                  _GPIO(132)
+#define TEGRA_PIN_KB_COL5_PQ5                  _GPIO(133)
+#define TEGRA_PIN_KB_COL6_PQ6                  _GPIO(134)
+#define TEGRA_PIN_KB_COL7_PQ7                  _GPIO(135)
+#define TEGRA_PIN_KB_ROW0_PR0                  _GPIO(136)
+#define TEGRA_PIN_KB_ROW1_PR1                  _GPIO(137)
+#define TEGRA_PIN_KB_ROW2_PR2                  _GPIO(138)
+#define TEGRA_PIN_KB_ROW3_PR3                  _GPIO(139)
+#define TEGRA_PIN_KB_ROW4_PR4                  _GPIO(140)
+#define TEGRA_PIN_KB_ROW5_PR5                  _GPIO(141)
+#define TEGRA_PIN_KB_ROW6_PR6                  _GPIO(142)
+#define TEGRA_PIN_KB_ROW7_PR7                  _GPIO(143)
+#define TEGRA_PIN_KB_ROW8_PS0                  _GPIO(144)
+#define TEGRA_PIN_KB_ROW9_PS1                  _GPIO(145)
+#define TEGRA_PIN_KB_ROW10_PS2                 _GPIO(146)
+#define TEGRA_PIN_GEN2_I2C_SCL_PT5             _GPIO(157)
+#define TEGRA_PIN_GEN2_I2C_SDA_PT6             _GPIO(158)
+#define TEGRA_PIN_SDMMC4_CMD_PT7               _GPIO(159)
+#define TEGRA_PIN_PU0                          _GPIO(160)
+#define TEGRA_PIN_PU1                          _GPIO(161)
+#define TEGRA_PIN_PU2                          _GPIO(162)
+#define TEGRA_PIN_PU3                          _GPIO(163)
+#define TEGRA_PIN_PU4                          _GPIO(164)
+#define TEGRA_PIN_PU5                          _GPIO(165)
+#define TEGRA_PIN_PU6                          _GPIO(166)
+#define TEGRA_PIN_PV0                          _GPIO(168)
+#define TEGRA_PIN_PV1                          _GPIO(169)
+#define TEGRA_PIN_SDMMC3_CD_N_PV2              _GPIO(170)
+#define TEGRA_PIN_SDMMC1_WP_N_PV3              _GPIO(171)
+#define TEGRA_PIN_DDC_SCL_PV4                  _GPIO(172)
+#define TEGRA_PIN_DDC_SDA_PV5                  _GPIO(173)
+#define TEGRA_PIN_GPIO_W2_AUD_PW2              _GPIO(178)
+#define TEGRA_PIN_GPIO_W3_AUD_PW3              _GPIO(179)
+#define TEGRA_PIN_CLK1_OUT_PW4                 _GPIO(180)
+#define TEGRA_PIN_CLK2_OUT_PW5                 _GPIO(181)
+#define TEGRA_PIN_UART3_TXD_PW6                        _GPIO(182)
+#define TEGRA_PIN_UART3_RXD_PW7                        _GPIO(183)
+#define TEGRA_PIN_DVFS_PWM_PX0                 _GPIO(184)
+#define TEGRA_PIN_GPIO_X1_AUD_PX1              _GPIO(185)
+#define TEGRA_PIN_DVFS_CLK_PX2                 _GPIO(186)
+#define TEGRA_PIN_GPIO_X3_AUD_PX3              _GPIO(187)
+#define TEGRA_PIN_GPIO_X4_AUD_PX4              _GPIO(188)
+#define TEGRA_PIN_GPIO_X5_AUD_PX5              _GPIO(189)
+#define TEGRA_PIN_GPIO_X6_AUD_PX6              _GPIO(190)
+#define TEGRA_PIN_GPIO_X7_AUD_PX7              _GPIO(191)
+#define TEGRA_PIN_ULPI_CLK_PY0                 _GPIO(192)
+#define TEGRA_PIN_ULPI_DIR_PY1                 _GPIO(193)
+#define TEGRA_PIN_ULPI_NXT_PY2                 _GPIO(194)
+#define TEGRA_PIN_ULPI_STP_PY3                 _GPIO(195)
+#define TEGRA_PIN_SDMMC1_DAT3_PY4              _GPIO(196)
+#define TEGRA_PIN_SDMMC1_DAT2_PY5              _GPIO(197)
+#define TEGRA_PIN_SDMMC1_DAT1_PY6              _GPIO(198)
+#define TEGRA_PIN_SDMMC1_DAT0_PY7              _GPIO(199)
+#define TEGRA_PIN_SDMMC1_CLK_PZ0               _GPIO(200)
+#define TEGRA_PIN_SDMMC1_CMD_PZ1               _GPIO(201)
+#define TEGRA_PIN_SYS_CLK_REQ_PZ5              _GPIO(205)
+#define TEGRA_PIN_PWR_I2C_SCL_PZ6              _GPIO(206)
+#define TEGRA_PIN_PWR_I2C_SDA_PZ7              _GPIO(207)
+#define TEGRA_PIN_SDMMC4_DAT0_PAA0             _GPIO(208)
+#define TEGRA_PIN_SDMMC4_DAT1_PAA1             _GPIO(209)
+#define TEGRA_PIN_SDMMC4_DAT2_PAA2             _GPIO(210)
+#define TEGRA_PIN_SDMMC4_DAT3_PAA3             _GPIO(211)
+#define TEGRA_PIN_SDMMC4_DAT4_PAA4             _GPIO(212)
+#define TEGRA_PIN_SDMMC4_DAT5_PAA5             _GPIO(213)
+#define TEGRA_PIN_SDMMC4_DAT6_PAA6             _GPIO(214)
+#define TEGRA_PIN_SDMMC4_DAT7_PAA7             _GPIO(215)
+#define TEGRA_PIN_PBB0                         _GPIO(216)
+#define TEGRA_PIN_CAM_I2C_SCL_PBB1             _GPIO(217)
+#define TEGRA_PIN_CAM_I2C_SDA_PBB2             _GPIO(218)
+#define TEGRA_PIN_PBB3                         _GPIO(219)
+#define TEGRA_PIN_PBB4                         _GPIO(220)
+#define TEGRA_PIN_PBB5                         _GPIO(221)
+#define TEGRA_PIN_PBB6                         _GPIO(222)
+#define TEGRA_PIN_PBB7                         _GPIO(223)
+#define TEGRA_PIN_CAM_MCLK_PCC0                        _GPIO(224)
+#define TEGRA_PIN_PCC1                         _GPIO(225)
+#define TEGRA_PIN_PCC2                         _GPIO(226)
+#define TEGRA_PIN_SDMMC4_CLK_PCC4              _GPIO(228)
+#define TEGRA_PIN_CLK2_REQ_PCC5                        _GPIO(229)
+#define TEGRA_PIN_CLK3_OUT_PEE0                        _GPIO(240)
+#define TEGRA_PIN_CLK3_REQ_PEE1                        _GPIO(241)
+#define TEGRA_PIN_CLK1_REQ_PEE2                        _GPIO(242)
+#define TEGRA_PIN_HDMI_CEC_PEE3                        _GPIO(243)
+#define TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4       _GPIO(244)
+#define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5                _GPIO(245)
+
+/* All non-GPIO pins follow */
+#define NUM_GPIOS      (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
+#define _PIN(offset)   (NUM_GPIOS + (offset))
+
+/* Non-GPIO pins */
+#define TEGRA_PIN_CORE_PWR_REQ                 _PIN(0)
+#define TEGRA_PIN_CPU_PWR_REQ                  _PIN(1)
+#define TEGRA_PIN_PWR_INT_N                    _PIN(2)
+#define TEGRA_PIN_RESET_OUT_N                  _PIN(3)
+#define TEGRA_PIN_OWR                          _PIN(4)
+
+static const struct pinctrl_pin_desc  tegra114_pins[] = {
+       PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
+       PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
+       PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
+       PINCTRL_PIN(TEGRA_PIN_DAP2_SCLK_PA3, "DAP2_SCLK PA3"),
+       PINCTRL_PIN(TEGRA_PIN_DAP2_DIN_PA4, "DAP2_DIN PA4"),
+       PINCTRL_PIN(TEGRA_PIN_DAP2_DOUT_PA5, "DAP2_DOUT PA5"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_PA6, "SDMMC3_CLK PA6"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_CMD_PA7, "SDMMC3_CMD PA7"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_A17_PB0, "GMI_A17 PB0"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_A18_PB1, "GMI_A18 PB1"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT3_PB4, "SDMMC3_DAT3 PB4"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT2_PB5, "SDMMC3_DAT2 PB5"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT1_PB6, "SDMMC3_DAT1 PB6"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_DAT0_PB7, "SDMMC3_DAT0 PB7"),
+       PINCTRL_PIN(TEGRA_PIN_UART3_RTS_N_PC0, "UART3_RTS_N PC0"),
+       PINCTRL_PIN(TEGRA_PIN_UART2_TXD_PC2, "UART2_TXD PC2"),
+       PINCTRL_PIN(TEGRA_PIN_UART2_RXD_PC3, "UART2_RXD PC3"),
+       PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SCL_PC4, "GEN1_I2C_SCL PC4"),
+       PINCTRL_PIN(TEGRA_PIN_GEN1_I2C_SDA_PC5, "GEN1_I2C_SDA PC5"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_WP_N_PC7, "GMI_WP_N PC7"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD0_PG0, "GMI_AD0 PG0"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD1_PG1, "GMI_AD1 PG1"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD2_PG2, "GMI_AD2 PG2"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD3_PG3, "GMI_AD3 PG3"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD4_PG4, "GMI_AD4 PG4"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD5_PG5, "GMI_AD5 PG5"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD6_PG6, "GMI_AD6 PG6"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD7_PG7, "GMI_AD7 PG7"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD8_PH0, "GMI_AD8 PH0"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD9_PH1, "GMI_AD9 PH1"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD10_PH2, "GMI_AD10 PH2"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD11_PH3, "GMI_AD11 PH3"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD12_PH4, "GMI_AD12 PH4"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD13_PH5, "GMI_AD13 PH5"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD14_PH6, "GMI_AD14 PH6"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_AD15_PH7, "GMI_AD15 PH7"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_WR_N_PI0, "GMI_WR_N PI0"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_OE_N_PI1, "GMI_OE_N PI1"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS6_N_PI3, "GMI_CS6_N PI3"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_RST_N_PI4, "GMI_RST_N PI4"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_IORDY_PI5, "GMI_IORDY PI5"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS7_N_PI6, "GMI_CS7_N PI6"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_WAIT_PI7, "GMI_WAIT PI7"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS0_N_PJ0, "GMI_CS0_N PJ0"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS1_N_PJ2, "GMI_CS1_N PJ2"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_DQS_P_PJ3, "GMI_DQS_P PJ3"),
+       PINCTRL_PIN(TEGRA_PIN_UART2_CTS_N_PJ5, "UART2_CTS_N PJ5"),
+       PINCTRL_PIN(TEGRA_PIN_UART2_RTS_N_PJ6, "UART2_RTS_N PJ6"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_A16_PJ7, "GMI_A16 PJ7"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_ADV_N_PK0, "GMI_ADV_N PK0"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CLK_PK1, "GMI_CLK PK1"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS4_N_PK2, "GMI_CS4_N PK2"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS2_N_PK3, "GMI_CS2_N PK3"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CS3_N_PK4, "GMI_CS3_N PK4"),
+       PINCTRL_PIN(TEGRA_PIN_SPDIF_OUT_PK5, "SPDIF_OUT PK5"),
+       PINCTRL_PIN(TEGRA_PIN_SPDIF_IN_PK6, "SPDIF_IN PK6"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_A19_PK7, "GMI_A19 PK7"),
+       PINCTRL_PIN(TEGRA_PIN_DAP1_FS_PN0, "DAP1_FS PN0"),
+       PINCTRL_PIN(TEGRA_PIN_DAP1_DIN_PN1, "DAP1_DIN PN1"),
+       PINCTRL_PIN(TEGRA_PIN_DAP1_DOUT_PN2, "DAP1_DOUT PN2"),
+       PINCTRL_PIN(TEGRA_PIN_DAP1_SCLK_PN3, "DAP1_SCLK PN3"),
+       PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN0_PN4, "USB_VBUS_EN0 PN4"),
+       PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN1_PN5, "USB_VBUS_EN1 PN5"),
+       PINCTRL_PIN(TEGRA_PIN_HDMI_INT_PN7, "HDMI_INT PN7"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA7_PO0, "ULPI_DATA7 PO0"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA0_PO1, "ULPI_DATA0 PO1"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA1_PO2, "ULPI_DATA1 PO2"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA2_PO3, "ULPI_DATA2 PO3"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA3_PO4, "ULPI_DATA3 PO4"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA4_PO5, "ULPI_DATA4 PO5"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA5_PO6, "ULPI_DATA5 PO6"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DATA6_PO7, "ULPI_DATA6 PO7"),
+       PINCTRL_PIN(TEGRA_PIN_DAP3_FS_PP0, "DAP3_FS PP0"),
+       PINCTRL_PIN(TEGRA_PIN_DAP3_DIN_PP1, "DAP3_DIN PP1"),
+       PINCTRL_PIN(TEGRA_PIN_DAP3_DOUT_PP2, "DAP3_DOUT PP2"),
+       PINCTRL_PIN(TEGRA_PIN_DAP3_SCLK_PP3, "DAP3_SCLK PP3"),
+       PINCTRL_PIN(TEGRA_PIN_DAP4_FS_PP4, "DAP4_FS PP4"),
+       PINCTRL_PIN(TEGRA_PIN_DAP4_DIN_PP5, "DAP4_DIN PP5"),
+       PINCTRL_PIN(TEGRA_PIN_DAP4_DOUT_PP6, "DAP4_DOUT PP6"),
+       PINCTRL_PIN(TEGRA_PIN_DAP4_SCLK_PP7, "DAP4_SCLK PP7"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL0_PQ0, "KB_COL0 PQ0"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL1_PQ1, "KB_COL1 PQ1"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL2_PQ2, "KB_COL2 PQ2"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL3_PQ3, "KB_COL3 PQ3"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL4_PQ4, "KB_COL4 PQ4"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL5_PQ5, "KB_COL5 PQ5"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL6_PQ6, "KB_COL6 PQ6"),
+       PINCTRL_PIN(TEGRA_PIN_KB_COL7_PQ7, "KB_COL7 PQ7"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW0_PR0, "KB_ROW0 PR0"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW1_PR1, "KB_ROW1 PR1"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW2_PR2, "KB_ROW2 PR2"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW3_PR3, "KB_ROW3 PR3"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW4_PR4, "KB_ROW4 PR4"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW5_PR5, "KB_ROW5 PR5"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW6_PR6, "KB_ROW6 PR6"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW7_PR7, "KB_ROW7 PR7"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
+       PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
+       PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
+       PINCTRL_PIN(TEGRA_PIN_PU0, "PU0"),
+       PINCTRL_PIN(TEGRA_PIN_PU1, "PU1"),
+       PINCTRL_PIN(TEGRA_PIN_PU2, "PU2"),
+       PINCTRL_PIN(TEGRA_PIN_PU3, "PU3"),
+       PINCTRL_PIN(TEGRA_PIN_PU4, "PU4"),
+       PINCTRL_PIN(TEGRA_PIN_PU5, "PU5"),
+       PINCTRL_PIN(TEGRA_PIN_PU6, "PU6"),
+       PINCTRL_PIN(TEGRA_PIN_PV0, "PV0"),
+       PINCTRL_PIN(TEGRA_PIN_PV1, "PV1"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_CD_N_PV2, "SDMMC3_CD_N PV2"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_WP_N_PV3, "SDMMC1_WP_N PV3"),
+       PINCTRL_PIN(TEGRA_PIN_DDC_SCL_PV4, "DDC_SCL PV4"),
+       PINCTRL_PIN(TEGRA_PIN_DDC_SDA_PV5, "DDC_SDA PV5"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_W2_AUD_PW2, "GPIO_W2_AUD PW2"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_W3_AUD_PW3, "GPIO_W3_AUD PW3"),
+       PINCTRL_PIN(TEGRA_PIN_CLK1_OUT_PW4, "CLK1_OUT PW4"),
+       PINCTRL_PIN(TEGRA_PIN_CLK2_OUT_PW5, "CLK2_OUT PW5"),
+       PINCTRL_PIN(TEGRA_PIN_UART3_TXD_PW6, "UART3_TXD PW6"),
+       PINCTRL_PIN(TEGRA_PIN_UART3_RXD_PW7, "UART3_RXD PW7"),
+       PINCTRL_PIN(TEGRA_PIN_DVFS_PWM_PX0, "DVFS_PWM PX0"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_X1_AUD_PX1, "GPIO_X1_AUD PX1"),
+       PINCTRL_PIN(TEGRA_PIN_DVFS_CLK_PX2, "DVFS_CLK PX2"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_X3_AUD_PX3, "GPIO_X3_AUD PX3"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_X4_AUD_PX4, "GPIO_X4_AUD PX4"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_X5_AUD_PX5, "GPIO_X5_AUD PX5"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_X6_AUD_PX6, "GPIO_X6_AUD PX6"),
+       PINCTRL_PIN(TEGRA_PIN_GPIO_X7_AUD_PX7, "GPIO_X7_AUD PX7"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_CLK_PY0, "ULPI_CLK PY0"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_DIR_PY1, "ULPI_DIR PY1"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_NXT_PY2, "ULPI_NXT PY2"),
+       PINCTRL_PIN(TEGRA_PIN_ULPI_STP_PY3, "ULPI_STP PY3"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT3_PY4, "SDMMC1_DAT3 PY4"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT2_PY5, "SDMMC1_DAT2 PY5"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT1_PY6, "SDMMC1_DAT1 PY6"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_DAT0_PY7, "SDMMC1_DAT0 PY7"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_CLK_PZ0, "SDMMC1_CLK PZ0"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC1_CMD_PZ1, "SDMMC1_CMD PZ1"),
+       PINCTRL_PIN(TEGRA_PIN_SYS_CLK_REQ_PZ5, "SYS_CLK_REQ PZ5"),
+       PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SCL_PZ6, "PWR_I2C_SCL PZ6"),
+       PINCTRL_PIN(TEGRA_PIN_PWR_I2C_SDA_PZ7, "PWR_I2C_SDA PZ7"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT0_PAA0, "SDMMC4_DAT0 PAA0"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT1_PAA1, "SDMMC4_DAT1 PAA1"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT2_PAA2, "SDMMC4_DAT2 PAA2"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT3_PAA3, "SDMMC4_DAT3 PAA3"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT4_PAA4, "SDMMC4_DAT4 PAA4"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT5_PAA5, "SDMMC4_DAT5 PAA5"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT6_PAA6, "SDMMC4_DAT6 PAA6"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_DAT7_PAA7, "SDMMC4_DAT7 PAA7"),
+       PINCTRL_PIN(TEGRA_PIN_PBB0, "PBB0"),
+       PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SCL_PBB1, "CAM_I2C_SCL PBB1"),
+       PINCTRL_PIN(TEGRA_PIN_CAM_I2C_SDA_PBB2, "CAM_I2C_SDA PBB2"),
+       PINCTRL_PIN(TEGRA_PIN_PBB3, "PBB3"),
+       PINCTRL_PIN(TEGRA_PIN_PBB4, "PBB4"),
+       PINCTRL_PIN(TEGRA_PIN_PBB5, "PBB5"),
+       PINCTRL_PIN(TEGRA_PIN_PBB6, "PBB6"),
+       PINCTRL_PIN(TEGRA_PIN_PBB7, "PBB7"),
+       PINCTRL_PIN(TEGRA_PIN_CAM_MCLK_PCC0, "CAM_MCLK PCC0"),
+       PINCTRL_PIN(TEGRA_PIN_PCC1, "PCC1"),
+       PINCTRL_PIN(TEGRA_PIN_PCC2, "PCC2"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC4_CLK_PCC4, "SDMMC4_CLK PCC4"),
+       PINCTRL_PIN(TEGRA_PIN_CLK2_REQ_PCC5, "CLK2_REQ PCC5"),
+       PINCTRL_PIN(TEGRA_PIN_CLK3_OUT_PEE0, "CLK3_OUT PEE0"),
+       PINCTRL_PIN(TEGRA_PIN_CLK3_REQ_PEE1, "CLK3_REQ PEE1"),
+       PINCTRL_PIN(TEGRA_PIN_CLK1_REQ_PEE2, "CLK1_REQ PEE2"),
+       PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
+       PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+       PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
+       PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
+       PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
+       PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+       PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
+};
+
+static const unsigned clk_32k_out_pa0_pins[] = {
+       TEGRA_PIN_CLK_32K_OUT_PA0,
+};
+
+static const unsigned uart3_cts_n_pa1_pins[] = {
+       TEGRA_PIN_UART3_CTS_N_PA1,
+};
+
+static const unsigned dap2_fs_pa2_pins[] = {
+       TEGRA_PIN_DAP2_FS_PA2,
+};
+
+static const unsigned dap2_sclk_pa3_pins[] = {
+       TEGRA_PIN_DAP2_SCLK_PA3,
+};
+
+static const unsigned dap2_din_pa4_pins[] = {
+       TEGRA_PIN_DAP2_DIN_PA4,
+};
+
+static const unsigned dap2_dout_pa5_pins[] = {
+       TEGRA_PIN_DAP2_DOUT_PA5,
+};
+
+static const unsigned sdmmc3_clk_pa6_pins[] = {
+       TEGRA_PIN_SDMMC3_CLK_PA6,
+};
+
+static const unsigned sdmmc3_cmd_pa7_pins[] = {
+       TEGRA_PIN_SDMMC3_CMD_PA7,
+};
+
+static const unsigned gmi_a17_pb0_pins[] = {
+       TEGRA_PIN_GMI_A17_PB0,
+};
+
+static const unsigned gmi_a18_pb1_pins[] = {
+       TEGRA_PIN_GMI_A18_PB1,
+};
+
+static const unsigned sdmmc3_dat3_pb4_pins[] = {
+       TEGRA_PIN_SDMMC3_DAT3_PB4,
+};
+
+static const unsigned sdmmc3_dat2_pb5_pins[] = {
+       TEGRA_PIN_SDMMC3_DAT2_PB5,
+};
+
+static const unsigned sdmmc3_dat1_pb6_pins[] = {
+       TEGRA_PIN_SDMMC3_DAT1_PB6,
+};
+
+static const unsigned sdmmc3_dat0_pb7_pins[] = {
+       TEGRA_PIN_SDMMC3_DAT0_PB7,
+};
+
+static const unsigned uart3_rts_n_pc0_pins[] = {
+       TEGRA_PIN_UART3_RTS_N_PC0,
+};
+
+static const unsigned uart2_txd_pc2_pins[] = {
+       TEGRA_PIN_UART2_TXD_PC2,
+};
+
+static const unsigned uart2_rxd_pc3_pins[] = {
+       TEGRA_PIN_UART2_RXD_PC3,
+};
+
+static const unsigned gen1_i2c_scl_pc4_pins[] = {
+       TEGRA_PIN_GEN1_I2C_SCL_PC4,
+};
+
+static const unsigned gen1_i2c_sda_pc5_pins[] = {
+       TEGRA_PIN_GEN1_I2C_SDA_PC5,
+};
+
+static const unsigned gmi_wp_n_pc7_pins[] = {
+       TEGRA_PIN_GMI_WP_N_PC7,
+};
+
+static const unsigned gmi_ad0_pg0_pins[] = {
+       TEGRA_PIN_GMI_AD0_PG0,
+};
+
+static const unsigned gmi_ad1_pg1_pins[] = {
+       TEGRA_PIN_GMI_AD1_PG1,
+};
+
+static const unsigned gmi_ad2_pg2_pins[] = {
+       TEGRA_PIN_GMI_AD2_PG2,
+};
+
+static const unsigned gmi_ad3_pg3_pins[] = {
+       TEGRA_PIN_GMI_AD3_PG3,
+};
+
+static const unsigned gmi_ad4_pg4_pins[] = {
+       TEGRA_PIN_GMI_AD4_PG4,
+};
+
+static const unsigned gmi_ad5_pg5_pins[] = {
+       TEGRA_PIN_GMI_AD5_PG5,
+};
+
+static const unsigned gmi_ad6_pg6_pins[] = {
+       TEGRA_PIN_GMI_AD6_PG6,
+};
+
+static const unsigned gmi_ad7_pg7_pins[] = {
+       TEGRA_PIN_GMI_AD7_PG7,
+};
+
+static const unsigned gmi_ad8_ph0_pins[] = {
+       TEGRA_PIN_GMI_AD8_PH0,
+};
+
+static const unsigned gmi_ad9_ph1_pins[] = {
+       TEGRA_PIN_GMI_AD9_PH1,
+};
+
+static const unsigned gmi_ad10_ph2_pins[] = {
+       TEGRA_PIN_GMI_AD10_PH2,
+};
+
+static const unsigned gmi_ad11_ph3_pins[] = {
+       TEGRA_PIN_GMI_AD11_PH3,
+};
+
+static const unsigned gmi_ad12_ph4_pins[] = {
+       TEGRA_PIN_GMI_AD12_PH4,
+};
+
+static const unsigned gmi_ad13_ph5_pins[] = {
+       TEGRA_PIN_GMI_AD13_PH5,
+};
+
+static const unsigned gmi_ad14_ph6_pins[] = {
+       TEGRA_PIN_GMI_AD14_PH6,
+};
+
+static const unsigned gmi_ad15_ph7_pins[] = {
+       TEGRA_PIN_GMI_AD15_PH7,
+};
+
+static const unsigned gmi_wr_n_pi0_pins[] = {
+       TEGRA_PIN_GMI_WR_N_PI0,
+};
+
+static const unsigned gmi_oe_n_pi1_pins[] = {
+       TEGRA_PIN_GMI_OE_N_PI1,
+};
+
+static const unsigned gmi_cs6_n_pi3_pins[] = {
+       TEGRA_PIN_GMI_CS6_N_PI3,
+};
+
+static const unsigned gmi_rst_n_pi4_pins[] = {
+       TEGRA_PIN_GMI_RST_N_PI4,
+};
+
+static const unsigned gmi_iordy_pi5_pins[] = {
+       TEGRA_PIN_GMI_IORDY_PI5,
+};
+
+static const unsigned gmi_cs7_n_pi6_pins[] = {
+       TEGRA_PIN_GMI_CS7_N_PI6,
+};
+
+static const unsigned gmi_wait_pi7_pins[] = {
+       TEGRA_PIN_GMI_WAIT_PI7,
+};
+
+static const unsigned gmi_cs0_n_pj0_pins[] = {
+       TEGRA_PIN_GMI_CS0_N_PJ0,
+};
+
+static const unsigned gmi_cs1_n_pj2_pins[] = {
+       TEGRA_PIN_GMI_CS1_N_PJ2,
+};
+
+static const unsigned gmi_dqs_p_pj3_pins[] = {
+       TEGRA_PIN_GMI_DQS_P_PJ3,
+};
+
+static const unsigned uart2_cts_n_pj5_pins[] = {
+       TEGRA_PIN_UART2_CTS_N_PJ5,
+};
+
+static const unsigned uart2_rts_n_pj6_pins[] = {
+       TEGRA_PIN_UART2_RTS_N_PJ6,
+};
+
+static const unsigned gmi_a16_pj7_pins[] = {
+       TEGRA_PIN_GMI_A16_PJ7,
+};
+
+static const unsigned gmi_adv_n_pk0_pins[] = {
+       TEGRA_PIN_GMI_ADV_N_PK0,
+};
+
+static const unsigned gmi_clk_pk1_pins[] = {
+       TEGRA_PIN_GMI_CLK_PK1,
+};
+
+static const unsigned gmi_cs4_n_pk2_pins[] = {
+       TEGRA_PIN_GMI_CS4_N_PK2,
+};
+
+static const unsigned gmi_cs2_n_pk3_pins[] = {
+       TEGRA_PIN_GMI_CS2_N_PK3,
+};
+
+static const unsigned gmi_cs3_n_pk4_pins[] = {
+       TEGRA_PIN_GMI_CS3_N_PK4,
+};
+
+static const unsigned spdif_out_pk5_pins[] = {
+       TEGRA_PIN_SPDIF_OUT_PK5,
+};
+
+static const unsigned spdif_in_pk6_pins[] = {
+       TEGRA_PIN_SPDIF_IN_PK6,
+};
+
+static const unsigned gmi_a19_pk7_pins[] = {
+       TEGRA_PIN_GMI_A19_PK7,
+};
+
+static const unsigned dap1_fs_pn0_pins[] = {
+       TEGRA_PIN_DAP1_FS_PN0,
+};
+
+static const unsigned dap1_din_pn1_pins[] = {
+       TEGRA_PIN_DAP1_DIN_PN1,
+};
+
+static const unsigned dap1_dout_pn2_pins[] = {
+       TEGRA_PIN_DAP1_DOUT_PN2,
+};
+
+static const unsigned dap1_sclk_pn3_pins[] = {
+       TEGRA_PIN_DAP1_SCLK_PN3,
+};
+
+static const unsigned usb_vbus_en0_pn4_pins[] = {
+       TEGRA_PIN_USB_VBUS_EN0_PN4,
+};
+
+static const unsigned usb_vbus_en1_pn5_pins[] = {
+       TEGRA_PIN_USB_VBUS_EN1_PN5,
+};
+
+static const unsigned hdmi_int_pn7_pins[] = {
+       TEGRA_PIN_HDMI_INT_PN7,
+};
+
+static const unsigned ulpi_data7_po0_pins[] = {
+       TEGRA_PIN_ULPI_DATA7_PO0,
+};
+
+static const unsigned ulpi_data0_po1_pins[] = {
+       TEGRA_PIN_ULPI_DATA0_PO1,
+};
+
+static const unsigned ulpi_data1_po2_pins[] = {
+       TEGRA_PIN_ULPI_DATA1_PO2,
+};
+
+static const unsigned ulpi_data2_po3_pins[] = {
+       TEGRA_PIN_ULPI_DATA2_PO3,
+};
+
+static const unsigned ulpi_data3_po4_pins[] = {
+       TEGRA_PIN_ULPI_DATA3_PO4,
+};
+
+static const unsigned ulpi_data4_po5_pins[] = {
+       TEGRA_PIN_ULPI_DATA4_PO5,
+};
+
+static const unsigned ulpi_data5_po6_pins[] = {
+       TEGRA_PIN_ULPI_DATA5_PO6,
+};
+
+static const unsigned ulpi_data6_po7_pins[] = {
+       TEGRA_PIN_ULPI_DATA6_PO7,
+};
+
+static const unsigned dap3_fs_pp0_pins[] = {
+       TEGRA_PIN_DAP3_FS_PP0,
+};
+
+static const unsigned dap3_din_pp1_pins[] = {
+       TEGRA_PIN_DAP3_DIN_PP1,
+};
+
+static const unsigned dap3_dout_pp2_pins[] = {
+       TEGRA_PIN_DAP3_DOUT_PP2,
+};
+
+static const unsigned dap3_sclk_pp3_pins[] = {
+       TEGRA_PIN_DAP3_SCLK_PP3,
+};
+
+static const unsigned dap4_fs_pp4_pins[] = {
+       TEGRA_PIN_DAP4_FS_PP4,
+};
+
+static const unsigned dap4_din_pp5_pins[] = {
+       TEGRA_PIN_DAP4_DIN_PP5,
+};
+
+static const unsigned dap4_dout_pp6_pins[] = {
+       TEGRA_PIN_DAP4_DOUT_PP6,
+};
+
+static const unsigned dap4_sclk_pp7_pins[] = {
+       TEGRA_PIN_DAP4_SCLK_PP7,
+};
+
+static const unsigned kb_col0_pq0_pins[] = {
+       TEGRA_PIN_KB_COL0_PQ0,
+};
+
+static const unsigned kb_col1_pq1_pins[] = {
+       TEGRA_PIN_KB_COL1_PQ1,
+};
+
+static const unsigned kb_col2_pq2_pins[] = {
+       TEGRA_PIN_KB_COL2_PQ2,
+};
+
+static const unsigned kb_col3_pq3_pins[] = {
+       TEGRA_PIN_KB_COL3_PQ3,
+};
+
+static const unsigned kb_col4_pq4_pins[] = {
+       TEGRA_PIN_KB_COL4_PQ4,
+};
+
+static const unsigned kb_col5_pq5_pins[] = {
+       TEGRA_PIN_KB_COL5_PQ5,
+};
+
+static const unsigned kb_col6_pq6_pins[] = {
+       TEGRA_PIN_KB_COL6_PQ6,
+};
+
+static const unsigned kb_col7_pq7_pins[] = {
+       TEGRA_PIN_KB_COL7_PQ7,
+};
+
+static const unsigned kb_row0_pr0_pins[] = {
+       TEGRA_PIN_KB_ROW0_PR0,
+};
+
+static const unsigned kb_row1_pr1_pins[] = {
+       TEGRA_PIN_KB_ROW1_PR1,
+};
+
+static const unsigned kb_row2_pr2_pins[] = {
+       TEGRA_PIN_KB_ROW2_PR2,
+};
+
+static const unsigned kb_row3_pr3_pins[] = {
+       TEGRA_PIN_KB_ROW3_PR3,
+};
+
+static const unsigned kb_row4_pr4_pins[] = {
+       TEGRA_PIN_KB_ROW4_PR4,
+};
+
+static const unsigned kb_row5_pr5_pins[] = {
+       TEGRA_PIN_KB_ROW5_PR5,
+};
+
+static const unsigned kb_row6_pr6_pins[] = {
+       TEGRA_PIN_KB_ROW6_PR6,
+};
+
+static const unsigned kb_row7_pr7_pins[] = {
+       TEGRA_PIN_KB_ROW7_PR7,
+};
+
+static const unsigned kb_row8_ps0_pins[] = {
+       TEGRA_PIN_KB_ROW8_PS0,
+};
+
+static const unsigned kb_row9_ps1_pins[] = {
+       TEGRA_PIN_KB_ROW9_PS1,
+};
+
+static const unsigned kb_row10_ps2_pins[] = {
+       TEGRA_PIN_KB_ROW10_PS2,
+};
+
+static const unsigned gen2_i2c_scl_pt5_pins[] = {
+       TEGRA_PIN_GEN2_I2C_SCL_PT5,
+};
+
+static const unsigned gen2_i2c_sda_pt6_pins[] = {
+       TEGRA_PIN_GEN2_I2C_SDA_PT6,
+};
+
+static const unsigned sdmmc4_cmd_pt7_pins[] = {
+       TEGRA_PIN_SDMMC4_CMD_PT7,
+};
+
+static const unsigned pu0_pins[] = {
+       TEGRA_PIN_PU0,
+};
+
+static const unsigned pu1_pins[] = {
+       TEGRA_PIN_PU1,
+};
+
+static const unsigned pu2_pins[] = {
+       TEGRA_PIN_PU2,
+};
+
+static const unsigned pu3_pins[] = {
+       TEGRA_PIN_PU3,
+};
+
+static const unsigned pu4_pins[] = {
+       TEGRA_PIN_PU4,
+};
+
+static const unsigned pu5_pins[] = {
+       TEGRA_PIN_PU5,
+};
+
+static const unsigned pu6_pins[] = {
+       TEGRA_PIN_PU6,
+};
+
+static const unsigned pv0_pins[] = {
+       TEGRA_PIN_PV0,
+};
+
+static const unsigned pv1_pins[] = {
+       TEGRA_PIN_PV1,
+};
+
+static const unsigned sdmmc3_cd_n_pv2_pins[] = {
+       TEGRA_PIN_SDMMC3_CD_N_PV2,
+};
+
+static const unsigned sdmmc1_wp_n_pv3_pins[] = {
+       TEGRA_PIN_SDMMC1_WP_N_PV3,
+};
+
+static const unsigned ddc_scl_pv4_pins[] = {
+       TEGRA_PIN_DDC_SCL_PV4,
+};
+
+static const unsigned ddc_sda_pv5_pins[] = {
+       TEGRA_PIN_DDC_SDA_PV5,
+};
+
+static const unsigned gpio_w2_aud_pw2_pins[] = {
+       TEGRA_PIN_GPIO_W2_AUD_PW2,
+};
+
+static const unsigned gpio_w3_aud_pw3_pins[] = {
+       TEGRA_PIN_GPIO_W3_AUD_PW3,
+};
+
+static const unsigned clk1_out_pw4_pins[] = {
+       TEGRA_PIN_CLK1_OUT_PW4,
+};
+
+static const unsigned clk2_out_pw5_pins[] = {
+       TEGRA_PIN_CLK2_OUT_PW5,
+};
+
+static const unsigned uart3_txd_pw6_pins[] = {
+       TEGRA_PIN_UART3_TXD_PW6,
+};
+
+static const unsigned uart3_rxd_pw7_pins[] = {
+       TEGRA_PIN_UART3_RXD_PW7,
+};
+
+static const unsigned dvfs_pwm_px0_pins[] = {
+       TEGRA_PIN_DVFS_PWM_PX0,
+};
+
+static const unsigned gpio_x1_aud_px1_pins[] = {
+       TEGRA_PIN_GPIO_X1_AUD_PX1,
+};
+
+static const unsigned dvfs_clk_px2_pins[] = {
+       TEGRA_PIN_DVFS_CLK_PX2,
+};
+
+static const unsigned gpio_x3_aud_px3_pins[] = {
+       TEGRA_PIN_GPIO_X3_AUD_PX3,
+};
+
+static const unsigned gpio_x4_aud_px4_pins[] = {
+       TEGRA_PIN_GPIO_X4_AUD_PX4,
+};
+
+static const unsigned gpio_x5_aud_px5_pins[] = {
+       TEGRA_PIN_GPIO_X5_AUD_PX5,
+};
+
+static const unsigned gpio_x6_aud_px6_pins[] = {
+       TEGRA_PIN_GPIO_X6_AUD_PX6,
+};
+
+static const unsigned gpio_x7_aud_px7_pins[] = {
+       TEGRA_PIN_GPIO_X7_AUD_PX7,
+};
+
+static const unsigned ulpi_clk_py0_pins[] = {
+       TEGRA_PIN_ULPI_CLK_PY0,
+};
+
+static const unsigned ulpi_dir_py1_pins[] = {
+       TEGRA_PIN_ULPI_DIR_PY1,
+};
+
+static const unsigned ulpi_nxt_py2_pins[] = {
+       TEGRA_PIN_ULPI_NXT_PY2,
+};
+
+static const unsigned ulpi_stp_py3_pins[] = {
+       TEGRA_PIN_ULPI_STP_PY3,
+};
+
+static const unsigned sdmmc1_dat3_py4_pins[] = {
+       TEGRA_PIN_SDMMC1_DAT3_PY4,
+};
+
+static const unsigned sdmmc1_dat2_py5_pins[] = {
+       TEGRA_PIN_SDMMC1_DAT2_PY5,
+};
+
+static const unsigned sdmmc1_dat1_py6_pins[] = {
+       TEGRA_PIN_SDMMC1_DAT1_PY6,
+};
+
+static const unsigned sdmmc1_dat0_py7_pins[] = {
+       TEGRA_PIN_SDMMC1_DAT0_PY7,
+};
+
+static const unsigned sdmmc1_clk_pz0_pins[] = {
+       TEGRA_PIN_SDMMC1_CLK_PZ0,
+};
+
+static const unsigned sdmmc1_cmd_pz1_pins[] = {
+       TEGRA_PIN_SDMMC1_CMD_PZ1,
+};
+
+static const unsigned sys_clk_req_pz5_pins[] = {
+       TEGRA_PIN_SYS_CLK_REQ_PZ5,
+};
+
+static const unsigned pwr_i2c_scl_pz6_pins[] = {
+       TEGRA_PIN_PWR_I2C_SCL_PZ6,
+};
+
+static const unsigned pwr_i2c_sda_pz7_pins[] = {
+       TEGRA_PIN_PWR_I2C_SDA_PZ7,
+};
+
+static const unsigned sdmmc4_dat0_paa0_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT0_PAA0,
+};
+
+static const unsigned sdmmc4_dat1_paa1_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT1_PAA1,
+};
+
+static const unsigned sdmmc4_dat2_paa2_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT2_PAA2,
+};
+
+static const unsigned sdmmc4_dat3_paa3_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT3_PAA3,
+};
+
+static const unsigned sdmmc4_dat4_paa4_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT4_PAA4,
+};
+
+static const unsigned sdmmc4_dat5_paa5_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT5_PAA5,
+};
+
+static const unsigned sdmmc4_dat6_paa6_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT6_PAA6,
+};
+
+static const unsigned sdmmc4_dat7_paa7_pins[] = {
+       TEGRA_PIN_SDMMC4_DAT7_PAA7,
+};
+
+static const unsigned pbb0_pins[] = {
+       TEGRA_PIN_PBB0,
+};
+
+static const unsigned cam_i2c_scl_pbb1_pins[] = {
+       TEGRA_PIN_CAM_I2C_SCL_PBB1,
+};
+
+static const unsigned cam_i2c_sda_pbb2_pins[] = {
+       TEGRA_PIN_CAM_I2C_SDA_PBB2,
+};
+
+static const unsigned pbb3_pins[] = {
+       TEGRA_PIN_PBB3,
+};
+
+static const unsigned pbb4_pins[] = {
+       TEGRA_PIN_PBB4,
+};
+
+static const unsigned pbb5_pins[] = {
+       TEGRA_PIN_PBB5,
+};
+
+static const unsigned pbb6_pins[] = {
+       TEGRA_PIN_PBB6,
+};
+
+static const unsigned pbb7_pins[] = {
+       TEGRA_PIN_PBB7,
+};
+
+static const unsigned cam_mclk_pcc0_pins[] = {
+       TEGRA_PIN_CAM_MCLK_PCC0,
+};
+
+static const unsigned pcc1_pins[] = {
+       TEGRA_PIN_PCC1,
+};
+
+static const unsigned pcc2_pins[] = {
+       TEGRA_PIN_PCC2,
+};
+
+static const unsigned sdmmc4_clk_pcc4_pins[] = {
+       TEGRA_PIN_SDMMC4_CLK_PCC4,
+};
+
+static const unsigned clk2_req_pcc5_pins[] = {
+       TEGRA_PIN_CLK2_REQ_PCC5,
+};
+
+static const unsigned clk3_out_pee0_pins[] = {
+       TEGRA_PIN_CLK3_OUT_PEE0,
+};
+
+static const unsigned clk3_req_pee1_pins[] = {
+       TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned clk1_req_pee2_pins[] = {
+       TEGRA_PIN_CLK1_REQ_PEE2,
+};
+
+static const unsigned hdmi_cec_pee3_pins[] = {
+       TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
+static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
+       TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
+};
+
+static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
+       TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
+};
+
+static const unsigned core_pwr_req_pins[] = {
+       TEGRA_PIN_CORE_PWR_REQ,
+};
+
+static const unsigned cpu_pwr_req_pins[] = {
+       TEGRA_PIN_CPU_PWR_REQ,
+};
+
+static const unsigned owr_pins[] = {
+       TEGRA_PIN_OWR,
+};
+
+static const unsigned pwr_int_n_pins[] = {
+       TEGRA_PIN_PWR_INT_N,
+};
+
+static const unsigned reset_out_n_pins[] = {
+       TEGRA_PIN_RESET_OUT_N,
+};
+
+static const unsigned drive_ao1_pins[] = {
+       TEGRA_PIN_KB_ROW0_PR0,
+       TEGRA_PIN_KB_ROW1_PR1,
+       TEGRA_PIN_KB_ROW2_PR2,
+       TEGRA_PIN_KB_ROW3_PR3,
+       TEGRA_PIN_KB_ROW4_PR4,
+       TEGRA_PIN_KB_ROW5_PR5,
+       TEGRA_PIN_KB_ROW6_PR6,
+       TEGRA_PIN_KB_ROW7_PR7,
+       TEGRA_PIN_PWR_I2C_SCL_PZ6,
+       TEGRA_PIN_PWR_I2C_SDA_PZ7,
+};
+
+static const unsigned drive_ao2_pins[] = {
+       TEGRA_PIN_CLK_32K_OUT_PA0,
+       TEGRA_PIN_KB_COL0_PQ0,
+       TEGRA_PIN_KB_COL1_PQ1,
+       TEGRA_PIN_KB_COL2_PQ2,
+       TEGRA_PIN_KB_COL3_PQ3,
+       TEGRA_PIN_KB_COL4_PQ4,
+       TEGRA_PIN_KB_COL5_PQ5,
+       TEGRA_PIN_KB_COL6_PQ6,
+       TEGRA_PIN_KB_COL7_PQ7,
+       TEGRA_PIN_KB_ROW8_PS0,
+       TEGRA_PIN_KB_ROW9_PS1,
+       TEGRA_PIN_KB_ROW10_PS2,
+       TEGRA_PIN_SYS_CLK_REQ_PZ5,
+       TEGRA_PIN_CORE_PWR_REQ,
+       TEGRA_PIN_CPU_PWR_REQ,
+       TEGRA_PIN_RESET_OUT_N,
+};
+
+static const unsigned drive_at1_pins[] = {
+       TEGRA_PIN_GMI_AD8_PH0,
+       TEGRA_PIN_GMI_AD9_PH1,
+       TEGRA_PIN_GMI_AD10_PH2,
+       TEGRA_PIN_GMI_AD11_PH3,
+       TEGRA_PIN_GMI_AD12_PH4,
+       TEGRA_PIN_GMI_AD13_PH5,
+       TEGRA_PIN_GMI_AD14_PH6,
+       TEGRA_PIN_GMI_AD15_PH7,
+
+       TEGRA_PIN_GMI_IORDY_PI5,
+       TEGRA_PIN_GMI_CS7_N_PI6,
+};
+
+static const unsigned drive_at2_pins[] = {
+       TEGRA_PIN_GMI_AD0_PG0,
+       TEGRA_PIN_GMI_AD1_PG1,
+       TEGRA_PIN_GMI_AD2_PG2,
+       TEGRA_PIN_GMI_AD3_PG3,
+       TEGRA_PIN_GMI_AD4_PG4,
+       TEGRA_PIN_GMI_AD5_PG5,
+       TEGRA_PIN_GMI_AD6_PG6,
+       TEGRA_PIN_GMI_AD7_PG7,
+
+       TEGRA_PIN_GMI_WR_N_PI0,
+       TEGRA_PIN_GMI_OE_N_PI1,
+       TEGRA_PIN_GMI_CS6_N_PI3,
+       TEGRA_PIN_GMI_RST_N_PI4,
+       TEGRA_PIN_GMI_WAIT_PI7,
+
+       TEGRA_PIN_GMI_DQS_P_PJ3,
+
+       TEGRA_PIN_GMI_ADV_N_PK0,
+       TEGRA_PIN_GMI_CLK_PK1,
+       TEGRA_PIN_GMI_CS4_N_PK2,
+       TEGRA_PIN_GMI_CS2_N_PK3,
+       TEGRA_PIN_GMI_CS3_N_PK4,
+};
+
+static const unsigned drive_at3_pins[] = {
+       TEGRA_PIN_GMI_WP_N_PC7,
+       TEGRA_PIN_GMI_CS0_N_PJ0,
+};
+
+static const unsigned drive_at4_pins[] = {
+       TEGRA_PIN_GMI_A17_PB0,
+       TEGRA_PIN_GMI_A18_PB1,
+       TEGRA_PIN_GMI_CS1_N_PJ2,
+       TEGRA_PIN_GMI_A16_PJ7,
+       TEGRA_PIN_GMI_A19_PK7,
+};
+
+static const unsigned drive_at5_pins[] = {
+       TEGRA_PIN_GEN2_I2C_SCL_PT5,
+       TEGRA_PIN_GEN2_I2C_SDA_PT6,
+};
+
+static const unsigned drive_cdev1_pins[] = {
+       TEGRA_PIN_CLK1_OUT_PW4,
+       TEGRA_PIN_CLK1_REQ_PEE2,
+};
+
+static const unsigned drive_cdev2_pins[] = {
+       TEGRA_PIN_CLK2_OUT_PW5,
+       TEGRA_PIN_CLK2_REQ_PCC5,
+       TEGRA_PIN_SDMMC1_WP_N_PV3,
+};
+
+static const unsigned drive_dap1_pins[] = {
+       TEGRA_PIN_DAP1_FS_PN0,
+       TEGRA_PIN_DAP1_DIN_PN1,
+       TEGRA_PIN_DAP1_DOUT_PN2,
+       TEGRA_PIN_DAP1_SCLK_PN3,
+};
+
+static const unsigned drive_dap2_pins[] = {
+       TEGRA_PIN_DAP2_FS_PA2,
+       TEGRA_PIN_DAP2_SCLK_PA3,
+       TEGRA_PIN_DAP2_DIN_PA4,
+       TEGRA_PIN_DAP2_DOUT_PA5,
+};
+
+static const unsigned drive_dap3_pins[] = {
+       TEGRA_PIN_DAP3_FS_PP0,
+       TEGRA_PIN_DAP3_DIN_PP1,
+       TEGRA_PIN_DAP3_DOUT_PP2,
+       TEGRA_PIN_DAP3_SCLK_PP3,
+};
+
+static const unsigned drive_dap4_pins[] = {
+       TEGRA_PIN_DAP4_FS_PP4,
+       TEGRA_PIN_DAP4_DIN_PP5,
+       TEGRA_PIN_DAP4_DOUT_PP6,
+       TEGRA_PIN_DAP4_SCLK_PP7,
+};
+
+static const unsigned drive_dbg_pins[] = {
+       TEGRA_PIN_GEN1_I2C_SCL_PC4,
+       TEGRA_PIN_GEN1_I2C_SDA_PC5,
+       TEGRA_PIN_PU0,
+       TEGRA_PIN_PU1,
+       TEGRA_PIN_PU2,
+       TEGRA_PIN_PU3,
+       TEGRA_PIN_PU4,
+       TEGRA_PIN_PU5,
+       TEGRA_PIN_PU6,
+};
+
+static const unsigned drive_sdio3_pins[] = {
+       TEGRA_PIN_SDMMC3_CLK_PA6,
+       TEGRA_PIN_SDMMC3_CMD_PA7,
+       TEGRA_PIN_SDMMC3_DAT3_PB4,
+       TEGRA_PIN_SDMMC3_DAT2_PB5,
+       TEGRA_PIN_SDMMC3_DAT1_PB6,
+       TEGRA_PIN_SDMMC3_DAT0_PB7,
+       TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4,
+       TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
+};
+
+static const unsigned drive_spi_pins[] = {
+       TEGRA_PIN_DVFS_PWM_PX0,
+       TEGRA_PIN_GPIO_X1_AUD_PX1,
+       TEGRA_PIN_DVFS_CLK_PX2,
+       TEGRA_PIN_GPIO_X3_AUD_PX3,
+       TEGRA_PIN_GPIO_X4_AUD_PX4,
+       TEGRA_PIN_GPIO_X5_AUD_PX5,
+       TEGRA_PIN_GPIO_X6_AUD_PX6,
+       TEGRA_PIN_GPIO_X7_AUD_PX7,
+       TEGRA_PIN_GPIO_W2_AUD_PW2,
+       TEGRA_PIN_GPIO_W3_AUD_PW3,
+};
+
+static const unsigned drive_uaa_pins[] = {
+       TEGRA_PIN_ULPI_DATA0_PO1,
+       TEGRA_PIN_ULPI_DATA1_PO2,
+       TEGRA_PIN_ULPI_DATA2_PO3,
+       TEGRA_PIN_ULPI_DATA3_PO4,
+};
+
+static const unsigned drive_uab_pins[] = {
+       TEGRA_PIN_ULPI_DATA7_PO0,
+       TEGRA_PIN_ULPI_DATA4_PO5,
+       TEGRA_PIN_ULPI_DATA5_PO6,
+       TEGRA_PIN_ULPI_DATA6_PO7,
+       TEGRA_PIN_PV0,
+       TEGRA_PIN_PV1,
+};
+
+static const unsigned drive_uart2_pins[] = {
+       TEGRA_PIN_UART2_TXD_PC2,
+       TEGRA_PIN_UART2_RXD_PC3,
+       TEGRA_PIN_UART2_CTS_N_PJ5,
+       TEGRA_PIN_UART2_RTS_N_PJ6,
+};
+
+static const unsigned drive_uart3_pins[] = {
+       TEGRA_PIN_UART3_CTS_N_PA1,
+       TEGRA_PIN_UART3_RTS_N_PC0,
+       TEGRA_PIN_UART3_TXD_PW6,
+       TEGRA_PIN_UART3_RXD_PW7,
+};
+
+static const unsigned drive_sdio1_pins[] = {
+       TEGRA_PIN_SDMMC1_DAT3_PY4,
+       TEGRA_PIN_SDMMC1_DAT2_PY5,
+       TEGRA_PIN_SDMMC1_DAT1_PY6,
+       TEGRA_PIN_SDMMC1_DAT0_PY7,
+       TEGRA_PIN_SDMMC1_CLK_PZ0,
+       TEGRA_PIN_SDMMC1_CMD_PZ1,
+};
+
+static const unsigned drive_ddc_pins[] = {
+       TEGRA_PIN_DDC_SCL_PV4,
+       TEGRA_PIN_DDC_SDA_PV5,
+};
+
+static const unsigned drive_gma_pins[] = {
+       TEGRA_PIN_SDMMC4_CLK_PCC4,
+       TEGRA_PIN_SDMMC4_CMD_PT7,
+       TEGRA_PIN_SDMMC4_DAT0_PAA0,
+       TEGRA_PIN_SDMMC4_DAT1_PAA1,
+       TEGRA_PIN_SDMMC4_DAT2_PAA2,
+       TEGRA_PIN_SDMMC4_DAT3_PAA3,
+       TEGRA_PIN_SDMMC4_DAT4_PAA4,
+       TEGRA_PIN_SDMMC4_DAT5_PAA5,
+       TEGRA_PIN_SDMMC4_DAT6_PAA6,
+       TEGRA_PIN_SDMMC4_DAT7_PAA7,
+};
+
+static const unsigned drive_gme_pins[] = {
+       TEGRA_PIN_PBB0,
+       TEGRA_PIN_CAM_I2C_SCL_PBB1,
+       TEGRA_PIN_CAM_I2C_SDA_PBB2,
+       TEGRA_PIN_PBB3,
+       TEGRA_PIN_PCC2,
+};
+
+static const unsigned drive_gmf_pins[] = {
+       TEGRA_PIN_PBB4,
+       TEGRA_PIN_PBB5,
+       TEGRA_PIN_PBB6,
+       TEGRA_PIN_PBB7,
+};
+
+static const unsigned drive_gmg_pins[] = {
+       TEGRA_PIN_CAM_MCLK_PCC0,
+};
+
+static const unsigned drive_gmh_pins[] = {
+       TEGRA_PIN_PCC1,
+};
+
+static const unsigned drive_owr_pins[] = {
+       TEGRA_PIN_SDMMC3_CD_N_PV2,
+};
+
+static const unsigned drive_uda_pins[] = {
+       TEGRA_PIN_ULPI_CLK_PY0,
+       TEGRA_PIN_ULPI_DIR_PY1,
+       TEGRA_PIN_ULPI_NXT_PY2,
+       TEGRA_PIN_ULPI_STP_PY3,
+};
+
+static const unsigned drive_dev3_pins[] = {
+       TEGRA_PIN_CLK3_OUT_PEE0,
+       TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+enum tegra_mux {
+       TEGRA_MUX_BLINK,
+       TEGRA_MUX_CEC,
+       TEGRA_MUX_CLDVFS,
+       TEGRA_MUX_CLK12,
+       TEGRA_MUX_CPU,
+       TEGRA_MUX_DAP,
+       TEGRA_MUX_DAP1,
+       TEGRA_MUX_DAP2,
+       TEGRA_MUX_DEV3,
+       TEGRA_MUX_DISPLAYA,
+       TEGRA_MUX_DISPLAYA_ALT,
+       TEGRA_MUX_DISPLAYB,
+       TEGRA_MUX_DTV,
+       TEGRA_MUX_EMC_DLL,
+       TEGRA_MUX_EXTPERIPH1,
+       TEGRA_MUX_EXTPERIPH2,
+       TEGRA_MUX_EXTPERIPH3,
+       TEGRA_MUX_GMI,
+       TEGRA_MUX_GMI_ALT,
+       TEGRA_MUX_HDA,
+       TEGRA_MUX_HSI,
+       TEGRA_MUX_I2C1,
+       TEGRA_MUX_I2C2,
+       TEGRA_MUX_I2C3,
+       TEGRA_MUX_I2C4,
+       TEGRA_MUX_I2CPWR,
+       TEGRA_MUX_I2S0,
+       TEGRA_MUX_I2S1,
+       TEGRA_MUX_I2S2,
+       TEGRA_MUX_I2S3,
+       TEGRA_MUX_I2S4,
+       TEGRA_MUX_IRDA,
+       TEGRA_MUX_KBC,
+       TEGRA_MUX_NAND,
+       TEGRA_MUX_NAND_ALT,
+       TEGRA_MUX_OWR,
+       TEGRA_MUX_PMI,
+       TEGRA_MUX_PWM0,
+       TEGRA_MUX_PWM1,
+       TEGRA_MUX_PWM2,
+       TEGRA_MUX_PWM3,
+       TEGRA_MUX_PWRON,
+       TEGRA_MUX_RESET_OUT_N,
+       TEGRA_MUX_RSVD1,
+       TEGRA_MUX_RSVD2,
+       TEGRA_MUX_RSVD3,
+       TEGRA_MUX_RSVD4,
+       TEGRA_MUX_SDMMC1,
+       TEGRA_MUX_SDMMC2,
+       TEGRA_MUX_SDMMC3,
+       TEGRA_MUX_SDMMC4,
+       TEGRA_MUX_SOC,
+       TEGRA_MUX_SPDIF,
+       TEGRA_MUX_SPI1,
+       TEGRA_MUX_SPI2,
+       TEGRA_MUX_SPI3,
+       TEGRA_MUX_SPI4,
+       TEGRA_MUX_SPI5,
+       TEGRA_MUX_SPI6,
+       TEGRA_MUX_SYSCLK,
+       TEGRA_MUX_TRACE,
+       TEGRA_MUX_UARTA,
+       TEGRA_MUX_UARTB,
+       TEGRA_MUX_UARTC,
+       TEGRA_MUX_UARTD,
+       TEGRA_MUX_ULPI,
+       TEGRA_MUX_USB,
+       TEGRA_MUX_VGP1,
+       TEGRA_MUX_VGP2,
+       TEGRA_MUX_VGP3,
+       TEGRA_MUX_VGP4,
+       TEGRA_MUX_VGP5,
+       TEGRA_MUX_VGP6,
+       TEGRA_MUX_VI,
+       TEGRA_MUX_VI_ALT1,
+       TEGRA_MUX_VI_ALT3,
+};
+
+static const char * const blink_groups[] = {
+       "clk_32k_out_pa0",
+};
+
+static const char * const cec_groups[] = {
+       "hdmi_cec_pee3",
+};
+
+static const char * const cldvfs_groups[] = {
+       "gmi_ad9_ph1",
+       "gmi_ad10_ph2",
+       "kb_row7_pr7",
+       "kb_row8_ps0",
+       "dvfs_pwm_px0",
+       "dvfs_clk_px2",
+};
+
+static const char * const clk12_groups[] = {
+       "sdmmc1_wp_n_pv3",
+       "sdmmc1_clk_pz0",
+};
+
+static const char * const cpu_groups[] = {
+       "cpu_pwr_req",
+};
+
+static const char * const dap_groups[] = {
+       "clk1_req_pee2",
+       "clk2_req_pcc5",
+};
+
+static const char * const dap1_groups[] = {
+       "clk1_req_pee2",
+};
+
+static const char * const dap2_groups[] = {
+       "clk1_out_pw4",
+       "gpio_x4_aud_px4",
+};
+
+static const char * const dev3_groups[] = {
+       "clk3_req_pee1",
+};
+
+static const char * const displaya_groups[] = {
+       "dap3_fs_pp0",
+       "dap3_din_pp1",
+       "dap3_dout_pp2",
+       "dap3_sclk_pp3",
+       "uart3_rts_n_pc0",
+       "pu3",
+       "pu4",
+       "pu5",
+       "pbb3",
+       "pbb4",
+       "pbb5",
+       "pbb6",
+       "kb_row3_pr3",
+       "kb_row4_pr4",
+       "kb_row5_pr5",
+       "kb_row6_pr6",
+       "kb_col3_pq3",
+       "sdmmc3_dat2_pb5",
+};
+
+static const char * const displaya_alt_groups[] = {
+       "kb_row6_pr6",
+};
+
+static const char * const displayb_groups[] = {
+       "dap3_fs_pp0",
+       "dap3_din_pp1",
+       "dap3_dout_pp2",
+       "dap3_sclk_pp3",
+       "pu3",
+       "pu4",
+       "pu5",
+       "pu6",
+       "pbb3",
+       "pbb4",
+       "pbb5",
+       "pbb6",
+       "kb_row3_pr3",
+       "kb_row4_pr4",
+       "kb_row5_pr5",
+       "kb_row6_pr6",
+       "sdmmc3_dat3_pb4",
+};
+
+static const char * const dtv_groups[] = {
+       "uart3_cts_n_pa1",
+       "uart3_rts_n_pc0",
+       "dap4_fs_pp4",
+       "dap4_dout_pp6",
+       "gmi_wait_pi7",
+       "gmi_ad8_ph0",
+       "gmi_ad14_ph6",
+       "gmi_ad15_ph7",
+};
+
+static const char * const emc_dll_groups[] = {
+       "kb_col0_pq0",
+       "kb_col1_pq1",
+};
+
+static const char * const extperiph1_groups[] = {
+       "clk1_out_pw4",
+};
+
+static const char * const extperiph2_groups[] = {
+       "clk2_out_pw5",
+};
+
+static const char * const extperiph3_groups[] = {
+       "clk3_out_pee0",
+};
+
+static const char * const gmi_groups[] = {
+       "gmi_wp_n_pc7",
+
+       "gmi_ad0_pg0",
+       "gmi_ad1_pg1",
+       "gmi_ad2_pg2",
+       "gmi_ad3_pg3",
+       "gmi_ad4_pg4",
+       "gmi_ad5_pg5",
+       "gmi_ad6_pg6",
+       "gmi_ad7_pg7",
+       "gmi_ad8_ph0",
+       "gmi_ad9_ph1",
+       "gmi_ad10_ph2",
+       "gmi_ad11_ph3",
+       "gmi_ad12_ph4",
+       "gmi_ad13_ph5",
+       "gmi_ad14_ph6",
+       "gmi_ad15_ph7",
+       "gmi_wr_n_pi0",
+       "gmi_oe_n_pi1",
+       "gmi_cs6_n_pi3",
+       "gmi_rst_n_pi4",
+       "gmi_iordy_pi5",
+       "gmi_cs7_n_pi6",
+       "gmi_wait_pi7",
+       "gmi_cs0_n_pj0",
+       "gmi_cs1_n_pj2",
+       "gmi_dqs_p_pj3",
+       "gmi_adv_n_pk0",
+       "gmi_clk_pk1",
+       "gmi_cs4_n_pk2",
+       "gmi_cs2_n_pk3",
+       "gmi_cs3_n_pk4",
+       "gmi_a16_pj7",
+       "gmi_a17_pb0",
+       "gmi_a18_pb1",
+       "gmi_a19_pk7",
+       "gen2_i2c_scl_pt5",
+       "gen2_i2c_sda_pt6",
+       "sdmmc4_dat0_paa0",
+       "sdmmc4_dat1_paa1",
+       "sdmmc4_dat2_paa2",
+       "sdmmc4_dat3_paa3",
+       "sdmmc4_dat4_paa4",
+       "sdmmc4_dat5_paa5",
+       "sdmmc4_dat6_paa6",
+       "sdmmc4_dat7_paa7",
+       "sdmmc4_clk_pcc4",
+       "sdmmc4_cmd_pt7",
+       "dap1_fs_pn0",
+       "dap1_din_pn1",
+       "dap1_dout_pn2",
+       "dap1_sclk_pn3",
+};
+
+static const char * const gmi_alt_groups[] = {
+       "gmi_wp_n_pc7",
+       "gmi_cs3_n_pk4",
+       "gmi_a16_pj7",
+};
+
+static const char * const hda_groups[] = {
+       "dap1_fs_pn0",
+       "dap1_din_pn1",
+       "dap1_dout_pn2",
+       "dap1_sclk_pn3",
+       "dap2_fs_pa2",
+       "dap2_sclk_pa3",
+       "dap2_din_pa4",
+       "dap2_dout_pa5",
+};
+
+static const char * const hsi_groups[] = {
+       "ulpi_data0_po1",
+       "ulpi_data1_po2",
+       "ulpi_data2_po3",
+       "ulpi_data3_po4",
+       "ulpi_data4_po5",
+       "ulpi_data5_po6",
+       "ulpi_data6_po7",
+       "ulpi_data7_po0",
+};
+
+static const char * const i2c1_groups[] = {
+       "gen1_i2c_scl_pc4",
+       "gen1_i2c_sda_pc5",
+       "gpio_w2_aud_pw2",
+       "gpio_w3_aud_pw3",
+};
+
+static const char * const i2c2_groups[] = {
+       "gen2_i2c_scl_pt5",
+       "gen2_i2c_sda_pt6",
+};
+
+static const char * const i2c3_groups[] = {
+       "cam_i2c_scl_pbb1",
+       "cam_i2c_sda_pbb2",
+};
+
+static const char * const i2c4_groups[] = {
+       "ddc_scl_pv4",
+       "ddc_sda_pv5",
+};
+
+static const char * const i2cpwr_groups[] = {
+       "pwr_i2c_scl_pz6",
+       "pwr_i2c_sda_pz7",
+};
+
+static const char * const i2s0_groups[] = {
+       "dap1_fs_pn0",
+       "dap1_din_pn1",
+       "dap1_dout_pn2",
+       "dap1_sclk_pn3",
+};
+
+static const char * const i2s1_groups[] = {
+       "dap2_fs_pa2",
+       "dap2_sclk_pa3",
+       "dap2_din_pa4",
+       "dap2_dout_pa5",
+};
+
+static const char * const i2s2_groups[] = {
+       "dap3_fs_pp0",
+       "dap3_din_pp1",
+       "dap3_dout_pp2",
+       "dap3_sclk_pp3",
+};
+
+static const char * const i2s3_groups[] = {
+       "dap4_fs_pp4",
+       "dap4_din_pp5",
+       "dap4_dout_pp6",
+       "dap4_sclk_pp7",
+};
+
+static const char * const i2s4_groups[] = {
+       "pcc1",
+       "pbb0",
+       "pbb7",
+       "pcc2",
+};
+
+static const char * const irda_groups[] = {
+       "uart2_rxd_pc3",
+       "uart2_txd_pc2",
+};
+
+static const char * const kbc_groups[] = {
+       "kb_row0_pr0",
+       "kb_row1_pr1",
+       "kb_row2_pr2",
+       "kb_row3_pr3",
+       "kb_row4_pr4",
+       "kb_row5_pr5",
+       "kb_row6_pr6",
+       "kb_row7_pr7",
+       "kb_row8_ps0",
+       "kb_row9_ps1",
+       "kb_row10_ps2",
+       "kb_col0_pq0",
+       "kb_col1_pq1",
+       "kb_col2_pq2",
+       "kb_col3_pq3",
+       "kb_col4_pq4",
+       "kb_col5_pq5",
+       "kb_col6_pq6",
+       "kb_col7_pq7",
+};
+
+static const char * const nand_groups[] = {
+       "gmi_wp_n_pc7",
+       "gmi_wait_pi7",
+       "gmi_adv_n_pk0",
+       "gmi_clk_pk1",
+       "gmi_cs0_n_pj0",
+       "gmi_cs1_n_pj2",
+       "gmi_cs2_n_pk3",
+       "gmi_cs3_n_pk4",
+       "gmi_cs4_n_pk2",
+       "gmi_cs6_n_pi3",
+       "gmi_cs7_n_pi6",
+       "gmi_ad0_pg0",
+       "gmi_ad1_pg1",
+       "gmi_ad2_pg2",
+       "gmi_ad3_pg3",
+       "gmi_ad4_pg4",
+       "gmi_ad5_pg5",
+       "gmi_ad6_pg6",
+       "gmi_ad7_pg7",
+       "gmi_ad8_ph0",
+       "gmi_ad9_ph1",
+       "gmi_ad10_ph2",
+       "gmi_ad11_ph3",
+       "gmi_ad12_ph4",
+       "gmi_ad13_ph5",
+       "gmi_ad14_ph6",
+       "gmi_ad15_ph7",
+       "gmi_wr_n_pi0",
+       "gmi_oe_n_pi1",
+       "gmi_dqs_p_pj3",
+       "gmi_rst_n_pi4",
+};
+
+static const char * const nand_alt_groups[] = {
+       "gmi_cs6_n_pi3",
+       "gmi_cs7_n_pi6",
+       "gmi_rst_n_pi4",
+};
+
+static const char * const owr_groups[] = {
+       "pu0",
+       "kb_col4_pq4",
+       "owr",
+       "sdmmc3_cd_n_pv2",
+};
+
+static const char * const pmi_groups[] = {
+       "pwr_int_n",
+};
+
+static const char * const pwm0_groups[] = {
+       "sdmmc1_dat2_py5",
+       "uart3_rts_n_pc0",
+       "pu3",
+       "gmi_ad8_ph0",
+       "sdmmc3_dat3_pb4",
+};
+
+static const char * const pwm1_groups[] = {
+       "sdmmc1_dat1_py6",
+       "pu4",
+       "gmi_ad9_ph1",
+       "sdmmc3_dat2_pb5",
+};
+
+static const char * const pwm2_groups[] = {
+       "pu5",
+       "gmi_ad10_ph2",
+       "kb_col3_pq3",
+       "sdmmc3_dat1_pb6",
+};
+
+static const char * const pwm3_groups[] = {
+       "pu6",
+       "gmi_ad11_ph3",
+       "sdmmc3_cmd_pa7",
+};
+
+static const char * const pwron_groups[] = {
+       "core_pwr_req",
+};
+
+static const char * const reset_out_n_groups[] = {
+       "reset_out_n",
+};
+
+static const char * const rsvd1_groups[] = {
+       "pv1",
+       "hdmi_int_pn7",
+       "pu1",
+       "pu2",
+       "gmi_wp_n_pc7",
+       "gmi_adv_n_pk0",
+       "gmi_cs0_n_pj0",
+       "gmi_cs1_n_pj2",
+       "gmi_ad0_pg0",
+       "gmi_ad1_pg1",
+       "gmi_ad2_pg2",
+       "gmi_ad3_pg3",
+       "gmi_ad4_pg4",
+       "gmi_ad5_pg5",
+       "gmi_ad6_pg6",
+       "gmi_ad7_pg7",
+       "gmi_wr_n_pi0",
+       "gmi_oe_n_pi1",
+       "gpio_x4_aud_px4",
+       "gpio_x5_aud_px5",
+       "gpio_x7_aud_px7",
+
+       "reset_out_n",
+};
+
+static const char * const rsvd2_groups[] = {
+       "pv0",
+       "pv1",
+       "sdmmc1_dat0_py7",
+       "clk2_out_pw5",
+       "clk2_req_pcc5",
+       "hdmi_int_pn7",
+       "ddc_scl_pv4",
+       "ddc_sda_pv5",
+       "uart3_txd_pw6",
+       "uart3_rxd_pw7",
+       "gen1_i2c_scl_pc4",
+       "gen1_i2c_sda_pc5",
+       "dap4_fs_pp4",
+       "dap4_din_pp5",
+       "dap4_dout_pp6",
+       "dap4_sclk_pp7",
+       "clk3_out_pee0",
+       "clk3_req_pee1",
+       "gmi_iordy_pi5",
+       "gmi_a17_pb0",
+       "gmi_a18_pb1",
+       "gen2_i2c_scl_pt5",
+       "gen2_i2c_sda_pt6",
+       "sdmmc4_clk_pcc4",
+       "sdmmc4_cmd_pt7",
+       "sdmmc4_dat7_paa7",
+       "pcc1",
+       "pbb7",
+       "pcc2",
+       "pwr_i2c_scl_pz6",
+       "pwr_i2c_sda_pz7",
+       "kb_row0_pr0",
+       "kb_row1_pr1",
+       "kb_row2_pr2",
+       "kb_row7_pr7",
+       "kb_row8_ps0",
+       "kb_row9_ps1",
+       "kb_row10_ps2",
+       "kb_col1_pq1",
+       "kb_col2_pq2",
+       "kb_col5_pq5",
+       "kb_col6_pq6",
+       "kb_col7_pq7",
+       "sys_clk_req_pz5",
+       "core_pwr_req",
+       "cpu_pwr_req",
+       "pwr_int_n",
+       "owr",
+       "spdif_out_pk5",
+       "gpio_x1_aud_px1",
+       "sdmmc3_clk_pa6",
+       "sdmmc3_dat0_pb7",
+       "gpio_w2_aud_pw2",
+       "usb_vbus_en0_pn4",
+       "usb_vbus_en1_pn5",
+       "sdmmc3_clk_lb_out_pee4",
+       "sdmmc3_clk_lb_in_pee5",
+       "reset_out_n",
+};
+
+static const char * const rsvd3_groups[] = {
+       "pv0",
+       "pv1",
+       "sdmmc1_clk_pz0",
+       "clk2_out_pw5",
+       "clk2_req_pcc5",
+       "hdmi_int_pn7",
+       "ddc_scl_pv4",
+       "ddc_sda_pv5",
+       "uart2_rts_n_pj6",
+       "uart2_cts_n_pj5",
+       "uart3_txd_pw6",
+       "uart3_rxd_pw7",
+       "pu0",
+       "pu1",
+       "pu2",
+       "gen1_i2c_scl_pc4",
+       "gen1_i2c_sda_pc5",
+       "dap4_din_pp5",
+       "dap4_sclk_pp7",
+       "clk3_out_pee0",
+       "clk3_req_pee1",
+       "pcc1",
+       "cam_i2c_scl_pbb1",
+       "cam_i2c_sda_pbb2",
+       "pbb7",
+       "pcc2",
+       "pwr_i2c_scl_pz6",
+       "pwr_i2c_sda_pz7",
+       "kb_row0_pr0",
+       "kb_row1_pr1",
+       "kb_row2_pr2",
+       "kb_row3_pr3",
+       "kb_row9_ps1",
+       "kb_row10_ps2",
+       "clk_32k_out_pa0",
+       "sys_clk_req_pz5",
+       "core_pwr_req",
+       "cpu_pwr_req",
+       "pwr_int_n",
+       "owr",
+       "clk1_req_pee2",
+       "clk1_out_pw4",
+       "spdif_out_pk5",
+       "spdif_in_pk6",
+       "dap2_fs_pa2",
+       "dap2_sclk_pa3",
+       "dap2_din_pa4",
+       "dap2_dout_pa5",
+       "dvfs_pwm_px0",
+       "gpio_x1_aud_px1",
+       "gpio_x3_aud_px3",
+       "dvfs_clk_px2",
+       "sdmmc3_clk_pa6",
+       "sdmmc3_dat0_pb7",
+       "hdmi_cec_pee3",
+       "sdmmc3_cd_n_pv2",
+       "usb_vbus_en0_pn4",
+       "usb_vbus_en1_pn5",
+       "sdmmc3_clk_lb_out_pee4",
+       "sdmmc3_clk_lb_in_pee5",
+       "reset_out_n",
+};
+
+static const char * const rsvd4_groups[] = {
+       "pv0",
+       "pv1",
+       "sdmmc1_clk_pz0",
+       "clk2_out_pw5",
+       "clk2_req_pcc5",
+       "hdmi_int_pn7",
+       "ddc_scl_pv4",
+       "ddc_sda_pv5",
+       "pu0",
+       "pu1",
+       "pu2",
+       "gen1_i2c_scl_pc4",
+       "gen1_i2c_sda_pc5",
+       "dap4_fs_pp4",
+       "dap4_din_pp5",
+       "dap4_dout_pp6",
+       "dap4_sclk_pp7",
+       "clk3_out_pee0",
+       "clk3_req_pee1",
+       "gmi_ad0_pg0",
+       "gmi_ad1_pg1",
+       "gmi_ad2_pg2",
+       "gmi_ad3_pg3",
+       "gmi_ad4_pg4",
+       "gmi_ad12_ph4",
+       "gmi_ad13_ph5",
+       "gmi_rst_n_pi4",
+       "gen2_i2c_scl_pt5",
+       "gen2_i2c_sda_pt6",
+       "sdmmc4_clk_pcc4",
+       "sdmmc4_cmd_pt7",
+       "sdmmc4_dat0_paa0",
+       "sdmmc4_dat1_paa1",
+       "sdmmc4_dat2_paa2",
+       "sdmmc4_dat3_paa3",
+       "sdmmc4_dat4_paa4",
+       "sdmmc4_dat5_paa5",
+       "sdmmc4_dat6_paa6",
+       "sdmmc4_dat7_paa7",
+       "cam_mclk_pcc0",
+       "pcc1",
+       "cam_i2c_scl_pbb1",
+       "cam_i2c_sda_pbb2",
+       "pbb3",
+       "pbb4",
+       "pbb5",
+       "pbb6",
+       "pbb7",
+       "pcc2",
+       "pwr_i2c_scl_pz6",
+       "pwr_i2c_sda_pz7",
+       "kb_row0_pr0",
+       "kb_row1_pr1",
+       "kb_row2_pr2",
+       "kb_col2_pq2",
+       "kb_col5_pq5",
+       "kb_col6_pq6",
+       "kb_col7_pq7",
+       "clk_32k_out_pa0",
+       "sys_clk_req_pz5",
+       "core_pwr_req",
+       "cpu_pwr_req",
+       "pwr_int_n",
+       "owr",
+       "dap1_fs_pn0",
+       "dap1_din_pn1",
+       "dap1_dout_pn2",
+       "dap1_sclk_pn3",
+       "clk1_req_pee2",
+       "clk1_out_pw4",
+       "spdif_in_pk6",
+       "spdif_out_pk5",
+       "dap2_fs_pa2",
+       "dap2_sclk_pa3",
+       "dap2_din_pa4",
+       "dap2_dout_pa5",
+       "dvfs_pwm_px0",
+       "gpio_x1_aud_px1",
+       "gpio_x3_aud_px3",
+       "dvfs_clk_px2",
+       "gpio_x5_aud_px5",
+       "gpio_x6_aud_px6",
+       "gpio_x7_aud_px7",
+       "sdmmc3_cd_n_pv2",
+       "usb_vbus_en0_pn4",
+       "usb_vbus_en1_pn5",
+       "sdmmc3_clk_lb_in_pee5",
+       "sdmmc3_clk_lb_out_pee4",
+};
+
+static const char * const sdmmc1_groups[] = {
+
+       "sdmmc1_clk_pz0",
+       "sdmmc1_cmd_pz1",
+       "sdmmc1_dat3_py4",
+       "sdmmc1_dat2_py5",
+       "sdmmc1_dat1_py6",
+       "sdmmc1_dat0_py7",
+       "uart3_cts_n_pa1",
+       "kb_col5_pq5",
+       "sdmmc1_wp_n_pv3",
+};
+
+static const char * const sdmmc2_groups[] = {
+       "gmi_iordy_pi5",
+       "gmi_clk_pk1",
+       "gmi_cs2_n_pk3",
+       "gmi_cs3_n_pk4",
+       "gmi_cs7_n_pi6",
+       "gmi_ad12_ph4",
+       "gmi_ad13_ph5",
+       "gmi_ad14_ph6",
+       "gmi_ad15_ph7",
+       "gmi_dqs_p_pj3",
+};
+
+static const char * const sdmmc3_groups[] = {
+       "kb_col4_pq4",
+       "sdmmc3_clk_pa6",
+       "sdmmc3_cmd_pa7",
+       "sdmmc3_dat0_pb7",
+       "sdmmc3_dat1_pb6",
+       "sdmmc3_dat2_pb5",
+       "sdmmc3_dat3_pb4",
+       "hdmi_cec_pee3",
+       "sdmmc3_cd_n_pv2",
+       "sdmmc3_clk_lb_in_pee5",
+       "sdmmc3_clk_lb_out_pee4",
+};
+
+static const char * const sdmmc4_groups[] = {
+       "sdmmc4_clk_pcc4",
+       "sdmmc4_cmd_pt7",
+       "sdmmc4_dat0_paa0",
+       "sdmmc4_dat1_paa1",
+       "sdmmc4_dat2_paa2",
+       "sdmmc4_dat3_paa3",
+       "sdmmc4_dat4_paa4",
+       "sdmmc4_dat5_paa5",
+       "sdmmc4_dat6_paa6",
+       "sdmmc4_dat7_paa7",
+};
+
+static const char * const soc_groups[] = {
+       "gmi_cs1_n_pj2",
+       "gmi_oe_n_pi1",
+       "clk_32k_out_pa0",
+       "hdmi_cec_pee3",
+};
+
+static const char * const spdif_groups[] = {
+       "sdmmc1_cmd_pz1",
+       "sdmmc1_dat3_py4",
+       "uart2_rxd_pc3",
+       "uart2_txd_pc2",
+       "spdif_in_pk6",
+       "spdif_out_pk5",
+};
+
+static const char * const spi1_groups[] = {
+       "ulpi_clk_py0",
+       "ulpi_dir_py1",
+       "ulpi_nxt_py2",
+       "ulpi_stp_py3",
+       "gpio_x3_aud_px3",
+       "gpio_x4_aud_px4",
+       "gpio_x5_aud_px5",
+       "gpio_x6_aud_px6",
+       "gpio_x7_aud_px7",
+       "gpio_w3_aud_pw3",
+};
+
+static const char * const spi2_groups[] = {
+       "ulpi_data4_po5",
+       "ulpi_data5_po6",
+       "ulpi_data6_po7",
+       "ulpi_data7_po0",
+       "kb_row4_pr4",
+       "kb_row5_pr5",
+       "kb_col0_pq0",
+       "kb_col1_pq1",
+       "kb_col2_pq2",
+       "kb_col6_pq6",
+       "kb_col7_pq7",
+       "gpio_x4_aud_px4",
+       "gpio_x5_aud_px5",
+       "gpio_x6_aud_px6",
+       "gpio_x7_aud_px7",
+       "gpio_w2_aud_pw2",
+       "gpio_w3_aud_pw3",
+};
+
+static const char * const spi3_groups[] = {
+       "ulpi_data0_po1",
+       "ulpi_data1_po2",
+       "ulpi_data2_po3",
+       "ulpi_data3_po4",
+       "sdmmc4_dat0_paa0",
+       "sdmmc4_dat1_paa1",
+       "sdmmc4_dat2_paa2",
+       "sdmmc4_dat3_paa3",
+       "sdmmc4_dat4_paa4",
+       "sdmmc4_dat5_paa5",
+       "sdmmc4_dat6_paa6",
+       "sdmmc3_clk_pa6",
+       "sdmmc3_cmd_pa7",
+       "sdmmc3_dat0_pb7",
+       "sdmmc3_dat1_pb6",
+       "sdmmc3_dat2_pb5",
+       "sdmmc3_dat3_pb4",
+};
+
+static const char * const spi4_groups[] = {
+       "sdmmc1_cmd_pz1",
+       "sdmmc1_dat3_py4",
+       "sdmmc1_dat2_py5",
+       "sdmmc1_dat1_py6",
+       "sdmmc1_dat0_py7",
+       "uart2_rxd_pc3",
+       "uart2_txd_pc2",
+       "uart2_rts_n_pj6",
+       "uart2_cts_n_pj5",
+       "uart3_txd_pw6",
+       "uart3_rxd_pw7",
+       "uart3_cts_n_pa1",
+       "gmi_wait_pi7",
+       "gmi_cs6_n_pi3",
+       "gmi_ad5_pg5",
+       "gmi_ad6_pg6",
+       "gmi_ad7_pg7",
+       "gmi_a19_pk7",
+       "gmi_wr_n_pi0",
+       "sdmmc1_wp_n_pv3",
+};
+
+static const char * const spi5_groups[] = {
+       "ulpi_clk_py0",
+       "ulpi_dir_py1",
+       "ulpi_nxt_py2",
+       "ulpi_stp_py3",
+       "dap3_fs_pp0",
+       "dap3_din_pp1",
+       "dap3_dout_pp2",
+       "dap3_sclk_pp3",
+};
+
+static const char * const spi6_groups[] = {
+       "dvfs_pwm_px0",
+       "gpio_x1_aud_px1",
+       "gpio_x3_aud_px3",
+       "dvfs_clk_px2",
+       "gpio_x6_aud_px6",
+       "gpio_w2_aud_pw2",
+       "gpio_w3_aud_pw3",
+};
+
+static const char * const sysclk_groups[] = {
+       "sys_clk_req_pz5",
+};
+
+static const char * const trace_groups[] = {
+       "gmi_iordy_pi5",
+       "gmi_adv_n_pk0",
+       "gmi_clk_pk1",
+       "gmi_cs2_n_pk3",
+       "gmi_cs4_n_pk2",
+       "gmi_a16_pj7",
+       "gmi_a17_pb0",
+       "gmi_a18_pb1",
+       "gmi_a19_pk7",
+       "gmi_dqs_p_pj3",
+};
+
+static const char * const uarta_groups[] = {
+       "ulpi_data0_po1",
+       "ulpi_data1_po2",
+       "ulpi_data2_po3",
+       "ulpi_data3_po4",
+       "ulpi_data4_po5",
+       "ulpi_data5_po6",
+       "ulpi_data6_po7",
+       "ulpi_data7_po0",
+       "sdmmc1_cmd_pz1",
+       "sdmmc1_dat3_py4",
+       "sdmmc1_dat2_py5",
+       "sdmmc1_dat1_py6",
+       "sdmmc1_dat0_py7",
+       "uart2_rxd_pc3",
+       "uart2_txd_pc2",
+       "uart2_rts_n_pj6",
+       "uart2_cts_n_pj5",
+       "pu0",
+       "pu1",
+       "pu2",
+       "pu3",
+       "pu4",
+       "pu5",
+       "pu6",
+       "kb_row7_pr7",
+       "kb_row8_ps0",
+       "kb_row9_ps1",
+       "kb_row10_ps2",
+       "kb_col3_pq3",
+       "kb_col4_pq4",
+       "sdmmc3_cmd_pa7",
+       "sdmmc3_dat1_pb6",
+       "sdmmc1_wp_n_pv3",
+};
+
+static const char * const uartb_groups[] = {
+       "uart2_rts_n_pj6",
+       "uart2_cts_n_pj5",
+};
+
+static const char * const uartc_groups[] = {
+       "uart3_txd_pw6",
+       "uart3_rxd_pw7",
+       "uart3_cts_n_pa1",
+       "uart3_rts_n_pc0",
+};
+
+static const char * const uartd_groups[] = {
+       "ulpi_clk_py0",
+       "ulpi_dir_py1",
+       "ulpi_nxt_py2",
+       "ulpi_stp_py3",
+       "gmi_a16_pj7",
+       "gmi_a17_pb0",
+       "gmi_a18_pb1",
+       "gmi_a19_pk7",
+};
+
+static const char * const ulpi_groups[] = {
+       "ulpi_data0_po1",
+       "ulpi_data1_po2",
+       "ulpi_data2_po3",
+       "ulpi_data3_po4",
+       "ulpi_data4_po5",
+       "ulpi_data5_po6",
+       "ulpi_data6_po7",
+       "ulpi_data7_po0",
+       "ulpi_clk_py0",
+       "ulpi_dir_py1",
+       "ulpi_nxt_py2",
+       "ulpi_stp_py3",
+};
+
+static const char * const usb_groups[] = {
+       "pv0",
+       "pu6",
+       "gmi_cs0_n_pj0",
+       "gmi_cs4_n_pk2",
+       "gmi_ad11_ph3",
+       "kb_col0_pq0",
+       "spdif_in_pk6",
+       "usb_vbus_en0_pn4",
+       "usb_vbus_en1_pn5",
+};
+
+static const char * const vgp1_groups[] = {
+       "cam_i2c_scl_pbb1",
+};
+
+static const char * const vgp2_groups[] = {
+       "cam_i2c_sda_pbb2",
+};
+
+static const char * const vgp3_groups[] = {
+       "pbb3",
+};
+
+static const char * const vgp4_groups[] = {
+       "pbb4",
+};
+
+static const char * const vgp5_groups[] = {
+       "pbb5",
+};
+
+static const char * const vgp6_groups[] = {
+       "pbb6",
+};
+
+static const char * const vi_groups[] = {
+       "cam_mclk_pcc0",
+       "pbb0",
+};
+
+static const char * const vi_alt1_groups[] = {
+       "cam_mclk_pcc0",
+       "pbb0",
+};
+
+static const char * const vi_alt3_groups[] = {
+       "cam_mclk_pcc0",
+       "pbb0",
+};
+
+#define FUNCTION(fname)                                        \
+       {                                               \
+               .name = #fname,                         \
+               .groups = fname##_groups,               \
+               .ngroups = ARRAY_SIZE(fname##_groups),  \
+       }
+
+static const struct tegra_function  tegra114_functions[] = {
+       FUNCTION(blink),
+       FUNCTION(cec),
+       FUNCTION(cldvfs),
+       FUNCTION(clk12),
+       FUNCTION(cpu),
+       FUNCTION(dap),
+       FUNCTION(dap1),
+       FUNCTION(dap2),
+       FUNCTION(dev3),
+       FUNCTION(displaya),
+       FUNCTION(displaya_alt),
+       FUNCTION(displayb),
+       FUNCTION(dtv),
+       FUNCTION(emc_dll),
+       FUNCTION(extperiph1),
+       FUNCTION(extperiph2),
+       FUNCTION(extperiph3),
+       FUNCTION(gmi),
+       FUNCTION(gmi_alt),
+       FUNCTION(hda),
+       FUNCTION(hsi),
+       FUNCTION(i2c1),
+       FUNCTION(i2c2),
+       FUNCTION(i2c3),
+       FUNCTION(i2c4),
+       FUNCTION(i2cpwr),
+       FUNCTION(i2s0),
+       FUNCTION(i2s1),
+       FUNCTION(i2s2),
+       FUNCTION(i2s3),
+       FUNCTION(i2s4),
+       FUNCTION(irda),
+       FUNCTION(kbc),
+       FUNCTION(nand),
+       FUNCTION(nand_alt),
+       FUNCTION(owr),
+       FUNCTION(pmi),
+       FUNCTION(pwm0),
+       FUNCTION(pwm1),
+       FUNCTION(pwm2),
+       FUNCTION(pwm3),
+       FUNCTION(pwron),
+       FUNCTION(reset_out_n),
+       FUNCTION(rsvd1),
+       FUNCTION(rsvd2),
+       FUNCTION(rsvd3),
+       FUNCTION(rsvd4),
+       FUNCTION(sdmmc1),
+       FUNCTION(sdmmc2),
+       FUNCTION(sdmmc3),
+       FUNCTION(sdmmc4),
+       FUNCTION(soc),
+       FUNCTION(spdif),
+       FUNCTION(spi1),
+       FUNCTION(spi2),
+       FUNCTION(spi3),
+       FUNCTION(spi4),
+       FUNCTION(spi5),
+       FUNCTION(spi6),
+       FUNCTION(sysclk),
+       FUNCTION(trace),
+       FUNCTION(uarta),
+       FUNCTION(uartb),
+       FUNCTION(uartc),
+       FUNCTION(uartd),
+       FUNCTION(ulpi),
+       FUNCTION(usb),
+       FUNCTION(vgp1),
+       FUNCTION(vgp2),
+       FUNCTION(vgp3),
+       FUNCTION(vgp4),
+       FUNCTION(vgp5),
+       FUNCTION(vgp6),
+       FUNCTION(vi),
+       FUNCTION(vi_alt1),
+       FUNCTION(vi_alt3),
+};
+
+#define DRV_PINGROUP_REG_START                 0x868   /* bank 0 */
+#define PINGROUP_REG_START                     0x3000  /* bank 1 */
+
+#define PINGROUP_REG_Y(r)                      ((r) - PINGROUP_REG_START)
+#define PINGROUP_REG_N(r)                      -1
+
+#define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
+       {                                                               \
+               .name = #pg_name,                                       \
+               .pins = pg_name##_pins,                                 \
+               .npins = ARRAY_SIZE(pg_name##_pins),                    \
+               .funcs = {                                              \
+                       TEGRA_MUX_##f0,                                 \
+                       TEGRA_MUX_##f1,                                 \
+                       TEGRA_MUX_##f2,                                 \
+                       TEGRA_MUX_##f3,                                 \
+               },                                                      \
+               .func_safe = TEGRA_MUX_##f_safe,                        \
+               .mux_reg = PINGROUP_REG_Y(r),                           \
+               .mux_bank = 1,                                          \
+               .mux_bit = 0,                                           \
+               .pupd_reg = PINGROUP_REG_Y(r),                          \
+               .pupd_bank = 1,                                         \
+               .pupd_bit = 2,                                          \
+               .tri_reg = PINGROUP_REG_Y(r),                           \
+               .tri_bank = 1,                                          \
+               .tri_bit = 4,                                           \
+               .einput_reg = PINGROUP_REG_Y(r),                        \
+               .einput_bank = 1,                                       \
+               .einput_bit = 5,                                        \
+               .odrain_reg = PINGROUP_REG_##od(r),                     \
+               .odrain_bank = 1,                                       \
+               .odrain_bit = 6,                                        \
+               .lock_reg = PINGROUP_REG_Y(r),                          \
+               .lock_bank = 1,                                         \
+               .lock_bit = 7,                                          \
+               .ioreset_reg = PINGROUP_REG_##ior(r),                   \
+               .ioreset_bank = 1,                                      \
+               .ioreset_bit = 8,                                       \
+               .rcv_sel_reg = PINGROUP_REG_##rcv_sel(r),               \
+               .rcv_sel_bank = 1,                                      \
+               .rcv_sel_bit = 9,                                       \
+               .drv_reg = -1,                                          \
+               .drvtype_reg = -1,                                      \
+       }
+
+#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_START)
+#define DRV_PINGROUP_DVRTYPE_N(r) -1
+
+#define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b,             \
+                       drvdn_b, drvdn_w, drvup_b, drvup_w,             \
+                       slwr_b, slwr_w, slwf_b, slwf_w,                 \
+                       drvtype)                                        \
+       {                                                               \
+               .name = "drive_" #pg_name,                              \
+               .pins = drive_##pg_name##_pins,                         \
+               .npins = ARRAY_SIZE(drive_##pg_name##_pins),            \
+               .mux_reg = -1,                                          \
+               .pupd_reg = -1,                                         \
+               .tri_reg = -1,                                          \
+               .einput_reg = -1,                                       \
+               .odrain_reg = -1,                                       \
+               .lock_reg = -1,                                         \
+               .ioreset_reg = -1,                                      \
+               .rcv_sel_reg = -1,                                      \
+               .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r),                   \
+               .drv_bank = 0,                                          \
+               .hsm_bit = hsm_b,                                       \
+               .schmitt_bit = schmitt_b,                               \
+               .lpmd_bit = lpmd_b,                                     \
+               .drvdn_bit = drvdn_b,                                   \
+               .drvdn_width = drvdn_w,                                 \
+               .drvup_bit = drvup_b,                                   \
+               .drvup_width = drvup_w,                                 \
+               .slwr_bit = slwr_b,                                     \
+               .slwr_width = slwr_w,                                   \
+               .slwf_bit = slwf_b,                                     \
+               .slwf_width = slwf_w,                                   \
+               .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r),       \
+               .drvtype_bank = 0,                                      \
+               .drvtype_bit = 6,                                       \
+       }
+
+static const struct tegra_pingroup tegra114_groups[] = {
+       /*       pg_name,                f0,         f1,         f2,           f3,          safe,     r,      od, ior, rcv_sel */
+       /* FIXME: Fill in correct data in safe column */
+       PINGROUP(ulpi_data0_po1,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x3000,  N,  N,  N),
+       PINGROUP(ulpi_data1_po2,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x3004,  N,  N,  N),
+       PINGROUP(ulpi_data2_po3,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x3008,  N,  N,  N),
+       PINGROUP(ulpi_data3_po4,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x300c,  N,  N,  N),
+       PINGROUP(ulpi_data4_po5,         SPI2,       HSI,        UARTA,        ULPI,        ULPI,     0x3010,  N,  N,  N),
+       PINGROUP(ulpi_data5_po6,         SPI2,       HSI,        UARTA,        ULPI,        ULPI,     0x3014,  N,  N,  N),
+       PINGROUP(ulpi_data6_po7,         SPI2,       HSI,        UARTA,        ULPI,        ULPI,     0x3018,  N,  N,  N),
+       PINGROUP(ulpi_data7_po0,         SPI2,       HSI,        UARTA,        ULPI,        ULPI,     0x301c,  N,  N,  N),
+       PINGROUP(ulpi_clk_py0,           SPI1,       SPI5,       UARTD,        ULPI,        ULPI,     0x3020,  N,  N,  N),
+       PINGROUP(ulpi_dir_py1,           SPI1,       SPI5,       UARTD,        ULPI,        ULPI,     0x3024,  N,  N,  N),
+       PINGROUP(ulpi_nxt_py2,           SPI1,       SPI5,       UARTD,        ULPI,        ULPI,     0x3028,  N,  N,  N),
+       PINGROUP(ulpi_stp_py3,           SPI1,       SPI5,       UARTD,        ULPI,        ULPI,     0x302c,  N,  N,  N),
+       PINGROUP(dap3_fs_pp0,            I2S2,       SPI5,       DISPLAYA,     DISPLAYB,    I2S2,     0x3030,  N,  N,  N),
+       PINGROUP(dap3_din_pp1,           I2S2,       SPI5,       DISPLAYA,     DISPLAYB,    I2S2,     0x3034,  N,  N,  N),
+       PINGROUP(dap3_dout_pp2,          I2S2,       SPI5,       DISPLAYA,     DISPLAYB,    I2S2,     0x3038,  N,  N,  N),
+       PINGROUP(dap3_sclk_pp3,          I2S2,       SPI5,       DISPLAYA,     DISPLAYB,    I2S2,     0x303c,  N,  N,  N),
+       PINGROUP(pv0,                    USB,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3040,  N,  N,  N),
+       PINGROUP(pv1,                    RSVD1,      RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3044,  N,  N,  N),
+       PINGROUP(sdmmc1_clk_pz0,         SDMMC1,     CLK12,      RSVD3,        RSVD4,       RSVD4,    0x3048,  N,  N,  N),
+       PINGROUP(sdmmc1_cmd_pz1,         SDMMC1,     SPDIF,      SPI4,         UARTA,       SDMMC1,   0x304c,  N,  N,  N),
+       PINGROUP(sdmmc1_dat3_py4,        SDMMC1,     SPDIF,      SPI4,         UARTA,       SDMMC1,   0x3050,  N,  N,  N),
+       PINGROUP(sdmmc1_dat2_py5,        SDMMC1,     PWM0,       SPI4,         UARTA,       SDMMC1,   0x3054,  N,  N,  N),
+       PINGROUP(sdmmc1_dat1_py6,        SDMMC1,     PWM1,       SPI4,         UARTA,       SDMMC1,   0x3058,  N,  N,  N),
+       PINGROUP(sdmmc1_dat0_py7,        SDMMC1,     RSVD2,      SPI4,         UARTA,       RSVD2,    0x305c,  N,  N,  N),
+       PINGROUP(clk2_out_pw5,           EXTPERIPH2, RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3068,  N,  N,  N),
+       PINGROUP(clk2_req_pcc5,          DAP,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x306c,  N,  N,  N),
+       PINGROUP(hdmi_int_pn7,           RSVD1,      RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3110,  N,  N,  Y),
+       PINGROUP(ddc_scl_pv4,            I2C4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3114,  N,  N,  Y),
+       PINGROUP(ddc_sda_pv5,            I2C4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3118,  N,  N,  Y),
+       PINGROUP(uart2_rxd_pc3,          IRDA,       SPDIF,      UARTA,        SPI4,        IRDA,     0x3164,  N,  N,  N),
+       PINGROUP(uart2_txd_pc2,          IRDA,       SPDIF,      UARTA,        SPI4,        IRDA,     0x3168,  N,  N,  N),
+       PINGROUP(uart2_rts_n_pj6,        UARTA,      UARTB,      RSVD3,        SPI4,        RSVD3,    0x316c,  N,  N,  N),
+       PINGROUP(uart2_cts_n_pj5,        UARTA,      UARTB,      RSVD3,        SPI4,        RSVD3,    0x3170,  N,  N,  N),
+       PINGROUP(uart3_txd_pw6,          UARTC,      RSVD2,      RSVD3,        SPI4,        RSVD3,    0x3174,  N,  N,  N),
+       PINGROUP(uart3_rxd_pw7,          UARTC,      RSVD2,      RSVD3,        SPI4,        RSVD3,    0x3178,  N,  N,  N),
+       PINGROUP(uart3_cts_n_pa1,        UARTC,      SDMMC1,     DTV,          SPI4,        UARTC,    0x317c,  N,  N,  N),
+       PINGROUP(uart3_rts_n_pc0,        UARTC,      PWM0,       DTV,          DISPLAYA,    UARTC,    0x3180,  N,  N,  N),
+       PINGROUP(pu0,                    OWR,        UARTA,      RSVD3,        RSVD4,       RSVD4,    0x3184,  N,  N,  N),
+       PINGROUP(pu1,                    RSVD1,      UARTA,      RSVD3,        RSVD4,       RSVD4,    0x3188,  N,  N,  N),
+       PINGROUP(pu2,                    RSVD1,      UARTA,      RSVD3,        RSVD4,       RSVD4,    0x318c,  N,  N,  N),
+       PINGROUP(pu3,                    PWM0,       UARTA,      DISPLAYA,     DISPLAYB,    PWM0,     0x3190,  N,  N,  N),
+       PINGROUP(pu4,                    PWM1,       UARTA,      DISPLAYA,     DISPLAYB,    PWM1,     0x3194,  N,  N,  N),
+       PINGROUP(pu5,                    PWM2,       UARTA,      DISPLAYA,     DISPLAYB,    PWM2,     0x3198,  N,  N,  N),
+       PINGROUP(pu6,                    PWM3,       UARTA,      USB,          DISPLAYB,    PWM3,     0x319c,  N,  N,  N),
+       PINGROUP(gen1_i2c_sda_pc5,       I2C1,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x31a0,  Y,  N,  N),
+       PINGROUP(gen1_i2c_scl_pc4,       I2C1,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x31a4,  Y,  N,  N),
+       PINGROUP(dap4_fs_pp4,            I2S3,       RSVD2,      DTV,          RSVD4,       RSVD4,    0x31a8,  N,  N,  N),
+       PINGROUP(dap4_din_pp5,           I2S3,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x31ac,  N,  N,  N),
+       PINGROUP(dap4_dout_pp6,          I2S3,       RSVD2,      DTV,          RSVD4,       RSVD4,    0x31b0,  N,  N,  N),
+       PINGROUP(dap4_sclk_pp7,          I2S3,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x31b4,  N,  N,  N),
+       PINGROUP(clk3_out_pee0,          EXTPERIPH3, RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x31b8,  N,  N,  N),
+       PINGROUP(clk3_req_pee1,          DEV3,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x31bc,  N,  N,  N),
+       PINGROUP(gmi_wp_n_pc7,           RSVD1,      NAND,       GMI,          GMI_ALT,     RSVD1,    0x31c0,  N,  N,  N),
+       PINGROUP(gmi_iordy_pi5,          SDMMC2,     RSVD2,      GMI,          TRACE,       RSVD2,    0x31c4,  N,  N,  N),
+       PINGROUP(gmi_wait_pi7,           SPI4,       NAND,       GMI,          DTV,         NAND,     0x31c8,  N,  N,  N),
+       PINGROUP(gmi_adv_n_pk0,          RSVD1,      NAND,       GMI,          TRACE,       RSVD1,    0x31cc,  N,  N,  N),
+       PINGROUP(gmi_clk_pk1,            SDMMC2,     NAND,       GMI,          TRACE,       GMI,      0x31d0,  N,  N,  N),
+       PINGROUP(gmi_cs0_n_pj0,          RSVD1,      NAND,       GMI,          USB,         RSVD1,    0x31d4,  N,  N,  N),
+       PINGROUP(gmi_cs1_n_pj2,          RSVD1,      NAND,       GMI,          SOC,         RSVD1,    0x31d8,  N,  N,  N),
+       PINGROUP(gmi_cs2_n_pk3,          SDMMC2,     NAND,       GMI,          TRACE,       GMI,      0x31dc,  N,  N,  N),
+       PINGROUP(gmi_cs3_n_pk4,          SDMMC2,     NAND,       GMI,          GMI_ALT,     GMI,      0x31e0,  N,  N,  N),
+       PINGROUP(gmi_cs4_n_pk2,          USB,        NAND,       GMI,          TRACE,       GMI,      0x31e4,  N,  N,  N),
+       PINGROUP(gmi_cs6_n_pi3,          NAND,       NAND_ALT,   GMI,          SPI4,        NAND,     0x31e8,  N,  N,  N),
+       PINGROUP(gmi_cs7_n_pi6,          NAND,       NAND_ALT,   GMI,          SDMMC2,      NAND,     0x31ec,  N,  N,  N),
+       PINGROUP(gmi_ad0_pg0,            RSVD1,      NAND,       GMI,          RSVD4,       RSVD4,    0x31f0,  N,  N,  N),
+       PINGROUP(gmi_ad1_pg1,            RSVD1,      NAND,       GMI,          RSVD4,       RSVD4,    0x31f4,  N,  N,  N),
+       PINGROUP(gmi_ad2_pg2,            RSVD1,      NAND,       GMI,          RSVD4,       RSVD4,    0x31f8,  N,  N,  N),
+       PINGROUP(gmi_ad3_pg3,            RSVD1,      NAND,       GMI,          RSVD4,       RSVD4,    0x31fc,  N,  N,  N),
+       PINGROUP(gmi_ad4_pg4,            RSVD1,      NAND,       GMI,          RSVD4,       RSVD4,    0x3200,  N,  N,  N),
+       PINGROUP(gmi_ad5_pg5,            RSVD1,      NAND,       GMI,          SPI4,        RSVD1,    0x3204,  N,  N,  N),
+       PINGROUP(gmi_ad6_pg6,            RSVD1,      NAND,       GMI,          SPI4,        RSVD1,    0x3208,  N,  N,  N),
+       PINGROUP(gmi_ad7_pg7,            RSVD1,      NAND,       GMI,          SPI4,        RSVD1,    0x320c,  N,  N,  N),
+       PINGROUP(gmi_ad8_ph0,            PWM0,       NAND,       GMI,          DTV,         GMI,      0x3210,  N,  N,  N),
+       PINGROUP(gmi_ad9_ph1,            PWM1,       NAND,       GMI,          CLDVFS,      GMI,      0x3214,  N,  N,  N),
+       PINGROUP(gmi_ad10_ph2,           PWM2,       NAND,       GMI,          CLDVFS,      GMI,      0x3218,  N,  N,  N),
+       PINGROUP(gmi_ad11_ph3,           PWM3,       NAND,       GMI,          USB,         GMI,      0x321c,  N,  N,  N),
+       PINGROUP(gmi_ad12_ph4,           SDMMC2,     NAND,       GMI,          RSVD4,       RSVD4,    0x3220,  N,  N,  N),
+       PINGROUP(gmi_ad13_ph5,           SDMMC2,     NAND,       GMI,          RSVD4,       RSVD4,    0x3224,  N,  N,  N),
+       PINGROUP(gmi_ad14_ph6,           SDMMC2,     NAND,       GMI,          DTV,         GMI,      0x3228,  N,  N,  N),
+       PINGROUP(gmi_ad15_ph7,           SDMMC2,     NAND,       GMI,          DTV,         GMI,      0x322c,  N,  N,  N),
+       PINGROUP(gmi_a16_pj7,            UARTD,      TRACE,      GMI,          GMI_ALT,     GMI,      0x3230,  N,  N,  N),
+       PINGROUP(gmi_a17_pb0,            UARTD,      RSVD2,      GMI,          TRACE,       RSVD2,    0x3234,  N,  N,  N),
+       PINGROUP(gmi_a18_pb1,            UARTD,      RSVD2,      GMI,          TRACE,       RSVD2,    0x3238,  N,  N,  N),
+       PINGROUP(gmi_a19_pk7,            UARTD,      SPI4,       GMI,          TRACE,       GMI,      0x323c,  N,  N,  N),
+       PINGROUP(gmi_wr_n_pi0,           RSVD1,      NAND,       GMI,          SPI4,        RSVD1,    0x3240,  N,  N,  N),
+       PINGROUP(gmi_oe_n_pi1,           RSVD1,      NAND,       GMI,          SOC,         RSVD1,    0x3244,  N,  N,  N),
+       PINGROUP(gmi_dqs_p_pj3,          SDMMC2,     NAND,       GMI,          TRACE,       NAND,     0x3248,  N,  N,  N),
+       PINGROUP(gmi_rst_n_pi4,          NAND,       NAND_ALT,   GMI,          RSVD4,       RSVD4,    0x324c,  N,  N,  N),
+       PINGROUP(gen2_i2c_scl_pt5,       I2C2,       RSVD2,      GMI,          RSVD4,       RSVD4,    0x3250,  Y,  N,  N),
+       PINGROUP(gen2_i2c_sda_pt6,       I2C2,       RSVD2,      GMI,          RSVD4,       RSVD4,    0x3254,  Y,  N,  N),
+       PINGROUP(sdmmc4_clk_pcc4,        SDMMC4,     RSVD2,      GMI,          RSVD4,       RSVD4,    0x3258,  N,  Y,  N),
+       PINGROUP(sdmmc4_cmd_pt7,         SDMMC4,     RSVD2,      GMI,          RSVD4,       RSVD4,    0x325c,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat0_paa0,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x3260,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat1_paa1,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x3264,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat2_paa2,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x3268,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat3_paa3,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x326c,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat4_paa4,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x3270,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat5_paa5,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x3274,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat6_paa6,       SDMMC4,     SPI3,       GMI,          RSVD4,       RSVD4,    0x3278,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat7_paa7,       SDMMC4,     RSVD2,      GMI,          RSVD4,       RSVD4,    0x327c,  N,  Y,  N),
+       PINGROUP(cam_mclk_pcc0,          VI,         VI_ALT1,    VI_ALT3,      RSVD4,       RSVD4,    0x3284,  N,  N,  N),
+       PINGROUP(pcc1,                   I2S4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3288,  N,  N,  N),
+       PINGROUP(pbb0,                   I2S4,       VI,         VI_ALT1,      VI_ALT3,     I2S4,     0x328c,  N,  N,  N),
+       PINGROUP(cam_i2c_scl_pbb1,       VGP1,       I2C3,       RSVD3,        RSVD4,       RSVD4,    0x3290,  Y,  N,  N),
+       PINGROUP(cam_i2c_sda_pbb2,       VGP2,       I2C3,       RSVD3,        RSVD4,       RSVD4,    0x3294,  Y,  N,  N),
+       PINGROUP(pbb3,                   VGP3,       DISPLAYA,   DISPLAYB,     RSVD4,       RSVD4,    0x3298,  N,  N,  N),
+       PINGROUP(pbb4,                   VGP4,       DISPLAYA,   DISPLAYB,     RSVD4,       RSVD4,    0x329c,  N,  N,  N),
+       PINGROUP(pbb5,                   VGP5,       DISPLAYA,   DISPLAYB,     RSVD4,       RSVD4,    0x32a0,  N,  N,  N),
+       PINGROUP(pbb6,                   VGP6,       DISPLAYA,   DISPLAYB,     RSVD4,       RSVD4,    0x32a4,  N,  N,  N),
+       PINGROUP(pbb7,                   I2S4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32a8,  N,  N,  N),
+       PINGROUP(pcc2,                   I2S4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32ac,  N,  N,  N),
+       PINGROUP(pwr_i2c_scl_pz6,        I2CPWR,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32b4,  Y,  N,  N),
+       PINGROUP(pwr_i2c_sda_pz7,        I2CPWR,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32b8,  Y,  N,  N),
+       PINGROUP(kb_row0_pr0,            KBC,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32bc,  N,  N,  N),
+       PINGROUP(kb_row1_pr1,            KBC,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32c0,  N,  N,  N),
+       PINGROUP(kb_row2_pr2,            KBC,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32c4,  N,  N,  N),
+       PINGROUP(kb_row3_pr3,            KBC,        DISPLAYA,   RSVD3,        DISPLAYB,    RSVD3,    0x32c8,  N,  N,  N),
+       PINGROUP(kb_row4_pr4,            KBC,        DISPLAYA,   SPI2,         DISPLAYB,    KBC,      0x32cc,  N,  N,  N),
+       PINGROUP(kb_row5_pr5,            KBC,        DISPLAYA,   SPI2,         DISPLAYB,    KBC,      0x32d0,  N,  N,  N),
+       PINGROUP(kb_row6_pr6,            KBC,        DISPLAYA,   DISPLAYA_ALT, DISPLAYB,    KBC,      0x32d4,  N,  N,  N),
+       PINGROUP(kb_row7_pr7,            KBC,        RSVD2,      CLDVFS,       UARTA,       RSVD2,    0x32d8,  N,  N,  N),
+       PINGROUP(kb_row8_ps0,            KBC,        RSVD2,      CLDVFS,       UARTA,       RSVD2,    0x32dc,  N,  N,  N),
+       PINGROUP(kb_row9_ps1,            KBC,        RSVD2,      RSVD3,        UARTA,       RSVD3,    0x32e0,  N,  N,  N),
+       PINGROUP(kb_row10_ps2,           KBC,        RSVD2,      RSVD3,        UARTA,       RSVD3,    0x32e4,  N,  N,  N),
+       PINGROUP(kb_col0_pq0,            KBC,        USB,        SPI2,         EMC_DLL,     KBC,      0x32fc,  N,  N,  N),
+       PINGROUP(kb_col1_pq1,            KBC,        RSVD2,      SPI2,         EMC_DLL,     RSVD2,    0x3300,  N,  N,  N),
+       PINGROUP(kb_col2_pq2,            KBC,        RSVD2,      SPI2,         RSVD4,       RSVD2,    0x3304,  N,  N,  N),
+       PINGROUP(kb_col3_pq3,            KBC,        DISPLAYA,   PWM2,         UARTA,       KBC,      0x3308,  N,  N,  N),
+       PINGROUP(kb_col4_pq4,            KBC,        OWR,        SDMMC3,       UARTA,       KBC,      0x330c,  N,  N,  N),
+       PINGROUP(kb_col5_pq5,            KBC,        RSVD2,      SDMMC1,       RSVD4,       RSVD4,    0x3310,  N,  N,  N),
+       PINGROUP(kb_col6_pq6,            KBC,        RSVD2,      SPI2,         RSVD4,       RSVD4,    0x3314,  N,  N,  N),
+       PINGROUP(kb_col7_pq7,            KBC,        RSVD2,      SPI2,         RSVD4,       RSVD4,    0x3318,  N,  N,  N),
+       PINGROUP(clk_32k_out_pa0,        BLINK,      SOC,        RSVD3,        RSVD4,       RSVD4,    0x331c,  N,  N,  N),
+       PINGROUP(sys_clk_req_pz5,        SYSCLK,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3320,  N,  N,  N),
+       PINGROUP(core_pwr_req,           PWRON,      RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3324,  N,  N,  N),
+       PINGROUP(cpu_pwr_req,            CPU,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3328,  N,  N,  N),
+       PINGROUP(pwr_int_n,              PMI,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x332c,  N,  N,  N),
+       PINGROUP(owr,                    OWR,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3334,  N,  N,  Y),
+       PINGROUP(dap1_fs_pn0,            I2S0,       HDA,        GMI,          RSVD4,       RSVD4,    0x3338,  N,  N,  N),
+       PINGROUP(dap1_din_pn1,           I2S0,       HDA,        GMI,          RSVD4,       RSVD4,    0x333c,  N,  N,  N),
+       PINGROUP(dap1_dout_pn2,          I2S0,       HDA,        GMI,          RSVD4,       RSVD4,    0x3340,  N,  N,  N),
+       PINGROUP(dap1_sclk_pn3,          I2S0,       HDA,        GMI,          RSVD4,       RSVD4,    0x3344,  N,  N,  N),
+       PINGROUP(clk1_req_pee2,          DAP,        DAP1,       RSVD3,        RSVD4,       RSVD4,    0x3348,  N,  N,  N),
+       PINGROUP(clk1_out_pw4,           EXTPERIPH1, DAP2,       RSVD3,        RSVD4,       RSVD4,    0x334c,  N,  N,  N),
+       PINGROUP(spdif_in_pk6,           SPDIF,      USB,        RSVD3,        RSVD4,       RSVD4,    0x3350,  N,  N,  N),
+       PINGROUP(spdif_out_pk5,          SPDIF,      RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3354,  N,  N,  N),
+       PINGROUP(dap2_fs_pa2,            I2S1,       HDA,        RSVD3,        RSVD4,       RSVD4,    0x3358,  N,  N,  N),
+       PINGROUP(dap2_din_pa4,           I2S1,       HDA,        RSVD3,        RSVD4,       RSVD4,    0x335c,  N,  N,  N),
+       PINGROUP(dap2_dout_pa5,          I2S1,       HDA,        RSVD3,        RSVD4,       RSVD4,    0x3360,  N,  N,  N),
+       PINGROUP(dap2_sclk_pa3,          I2S1,       HDA,        RSVD3,        RSVD4,       RSVD4,    0x3364,  N,  N,  N),
+       PINGROUP(dvfs_pwm_px0,           SPI6,       CLDVFS,     RSVD3,        RSVD4,       RSVD4,    0x3368,  N,  N,  N),
+       PINGROUP(gpio_x1_aud_px1,        SPI6,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x336c,  N,  N,  N),
+       PINGROUP(gpio_x3_aud_px3,        SPI6,       SPI1,       RSVD3,        RSVD4,       RSVD4,    0x3370,  N,  N,  N),
+       PINGROUP(dvfs_clk_px2,           SPI6,       CLDVFS,     RSVD3,        RSVD4,       RSVD4,    0x3374,  N,  N,  N),
+       PINGROUP(gpio_x4_aud_px4,        RSVD1,      SPI1,       SPI2,         DAP2,        RSVD1,    0x3378,  N,  N,  N),
+       PINGROUP(gpio_x5_aud_px5,        RSVD1,      SPI1,       SPI2,         RSVD4,       RSVD1,    0x337c,  N,  N,  N),
+       PINGROUP(gpio_x6_aud_px6,        SPI6,       SPI1,       SPI2,         RSVD4,       RSVD4,    0x3380,  N,  N,  N),
+       PINGROUP(gpio_x7_aud_px7,        RSVD1,      SPI1,       SPI2,         RSVD4,       RSVD4,    0x3384,  N,  N,  N),
+       PINGROUP(sdmmc3_clk_pa6,         SDMMC3,     RSVD2,      RSVD3,        SPI3,        RSVD3,    0x3390,  N,  N,  N),
+       PINGROUP(sdmmc3_cmd_pa7,         SDMMC3,     PWM3,       UARTA,        SPI3,        SDMMC3,   0x3394,  N,  N,  N),
+       PINGROUP(sdmmc3_dat0_pb7,        SDMMC3,     RSVD2,      RSVD3,        SPI3,        RSVD3,    0x3398,  N,  N,  N),
+       PINGROUP(sdmmc3_dat1_pb6,        SDMMC3,     PWM2,       UARTA,        SPI3,        SDMMC3,   0x339c,  N,  N,  N),
+       PINGROUP(sdmmc3_dat2_pb5,        SDMMC3,     PWM1,       DISPLAYA,     SPI3,        SDMMC3,   0x33a0,  N,  N,  N),
+       PINGROUP(sdmmc3_dat3_pb4,        SDMMC3,     PWM0,       DISPLAYB,     SPI3,        SDMMC3,   0x33a4,  N,  N,  N),
+       PINGROUP(hdmi_cec_pee3,          CEC,        SDMMC3,     RSVD3,        SOC,         RSVD3,    0x33e0,  Y,  N,  N),
+       PINGROUP(sdmmc1_wp_n_pv3,        SDMMC1,     CLK12,      SPI4,         UARTA,       SDMMC1,   0x33e4,  N,  N,  N),
+       PINGROUP(sdmmc3_cd_n_pv2,        SDMMC3,     OWR,        RSVD3,        RSVD4,       RSVD4,    0x33e8,  N,  N,  N),
+       PINGROUP(gpio_w2_aud_pw2,        SPI6,       RSVD2,      SPI2,         I2C1,        RSVD2,    0x33ec,  N,  N,  N),
+       PINGROUP(gpio_w3_aud_pw3,        SPI6,       SPI1,       SPI2,         I2C1,        SPI6,     0x33f0,  N,  N,  N),
+       PINGROUP(usb_vbus_en0_pn4,       USB,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x33f4,  Y,  N,  N),
+       PINGROUP(usb_vbus_en1_pn5,       USB,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x33f8,  Y,  N,  N),
+       PINGROUP(sdmmc3_clk_lb_in_pee5,  SDMMC3,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x33fc,  N,  N,  N),
+       PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3400,  N,  N,  N),
+       PINGROUP(reset_out_n,            RSVD1,      RSVD2,      RSVD3,        RESET_OUT_N, RSVD3,    0x3408,  N,  N,  N),
+
+       /* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
+       DRV_PINGROUP(ao1,   0x868,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ao2,   0x86c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(at1,   0x870,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at2,   0x874,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at3,   0x878,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at4,   0x87c,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at5,   0x880,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(cdev1, 0x884,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(cdev2, 0x888,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap1,  0x890,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap2,  0x894,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap3,  0x898,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap4,  0x89c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dbg,   0x8a0,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(sdio3, 0x8b0,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(spi,   0x8b4,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uaa,   0x8b8,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uab,   0x8bc,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uart2, 0x8c0,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uart3, 0x8c4,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(sdio1, 0x8ec,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ddc,   0x8fc,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gma,   0x900,  2,  3,  4,  14,  5,  20,  5,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(gme,   0x910,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gmf,   0x914,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gmg,   0x918,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gmh,   0x91c,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(owr,   0x920,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uda,   0x924,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+};
+
+static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
+       .ngpios = NUM_GPIOS,
+       .pins = tegra114_pins,
+       .npins = ARRAY_SIZE(tegra114_pins),
+       .functions = tegra114_functions,
+       .nfunctions = ARRAY_SIZE(tegra114_functions),
+       .groups = tegra114_groups,
+       .ngroups = ARRAY_SIZE(tegra114_groups),
+};
+
+static int tegra114_pinctrl_probe(struct platform_device *pdev)
+{
+       return tegra_pinctrl_probe(pdev, &tegra114_pinctrl);
+}
+
+static struct of_device_id tegra114_pinctrl_of_match[] = {
+       { .compatible = "nvidia,tegra114-pinmux", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tegra114_pinctrl_of_match);
+
+static struct platform_driver tegra114_pinctrl_driver = {
+       .driver = {
+               .name = "tegra114-pinctrl",
+               .owner = THIS_MODULE,
+               .of_match_table = tegra114_pinctrl_of_match,
+       },
+       .probe = tegra114_pinctrl_probe,
+       .remove = tegra_pinctrl_remove,
+};
+module_platform_driver(tegra114_pinctrl_driver);
+
+MODULE_ALIAS("platform:tegra114-pinctrl");
+MODULE_AUTHOR("Pritesh Raithatha <praithatha@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra114 pincontrol driver");
+MODULE_LICENSE("GPL v2");
index e848189038f0654dce86ed020075ff3ddddbc107..fcfb7d012c5b68bda551917f89c657956f2af6dd 100644 (file)
@@ -2624,7 +2624,9 @@ static const struct tegra_function tegra20_functions[] = {
                .odrain_reg = -1,                               \
                .lock_reg = -1,                                 \
                .ioreset_reg = -1,                              \
+               .rcv_sel_reg = -1,                              \
                .drv_reg = -1,                                  \
+               .drvtype_reg = -1,                              \
        }
 
 /* Pin groups with only pull up and pull down control */
@@ -2642,7 +2644,9 @@ static const struct tegra_function tegra20_functions[] = {
                .odrain_reg = -1,                               \
                .lock_reg = -1,                                 \
                .ioreset_reg = -1,                              \
+               .rcv_sel_reg = -1,                              \
                .drv_reg = -1,                                  \
+               .drvtype_reg = -1,                              \
        }
 
 /* Pin groups for drive strength registers (configurable version) */
@@ -2660,6 +2664,7 @@ static const struct tegra_function tegra20_functions[] = {
                .odrain_reg = -1,                               \
                .lock_reg = -1,                                 \
                .ioreset_reg = -1,                              \
+               .rcv_sel_reg = -1,                              \
                .drv_reg = ((r) - PINGROUP_REG_A),              \
                .drv_bank = 3,                                  \
                .hsm_bit = hsm_b,                               \
@@ -2673,6 +2678,7 @@ static const struct tegra_function tegra20_functions[] = {
                .slwr_width = slwr_w,                           \
                .slwf_bit = slwf_b,                             \
                .slwf_width = slwf_w,                           \
+               .drvtype_reg = -1,                              \
        }
 
 /* Pin groups for drive strength registers (simple version) */
index 9ad87ea735d4b6240c9702e67bede26f78dacce3..2300deba25bd4b39157a5a8f1e8524f965488bd3 100644 (file)
@@ -3384,7 +3384,9 @@ static const struct tegra_function tegra30_functions[] = {
                .ioreset_reg = PINGROUP_REG_##ior(r),           \
                .ioreset_bank = 1,                              \
                .ioreset_bit = 8,                               \
+               .rcv_sel_reg = -1,                              \
                .drv_reg = -1,                                  \
+               .drvtype_reg = -1,                              \
        }
 
 #define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b,     \
@@ -3401,6 +3403,7 @@ static const struct tegra_function tegra30_functions[] = {
                .odrain_reg = -1,                               \
                .lock_reg = -1,                                 \
                .ioreset_reg = -1,                              \
+               .rcv_sel_reg = -1,                              \
                .drv_reg = ((r) - DRV_PINGROUP_REG_A),          \
                .drv_bank = 0,                                  \
                .hsm_bit = hsm_b,                               \
@@ -3414,6 +3417,7 @@ static const struct tegra_function tegra30_functions[] = {
                .slwr_width = slwr_w,                           \
                .slwf_bit = slwf_b,                             \
                .slwf_width = slwf_w,                           \
+               .drvtype_reg = -1,                              \
        }
 
 static const struct tegra_pingroup tegra30_groups[] = {
index 5f0eb04c23364de9ff8a427e4ee4af9e9cd7b155..53cb6a3a56edd58885985b40800e94604cb2db9c 100644 (file)
@@ -441,17 +441,17 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
                if (port == PORT3)
                        reg = GPIO3_OD;
                else
-                       reg = GPIO_OD(port);
+                       reg = GPIO_OD(pin);
                *config = LTQ_PINCONF_PACK(param,
-                       !!gpio_getbit(info->membase[0], reg, PORT_PIN(port)));
+                       !gpio_getbit(info->membase[0], reg, PORT_PIN(pin)));
                break;
 
        case LTQ_PINCONF_PARAM_PULL:
                if (port == PORT3)
                        reg = GPIO3_PUDEN;
                else
-                       reg = GPIO_PUDEN(port);
-               if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port))) {
+                       reg = GPIO_PUDEN(pin);
+               if (!gpio_getbit(info->membase[0], reg, PORT_PIN(pin))) {
                        *config = LTQ_PINCONF_PACK(param, 0);
                        break;
                }
@@ -459,13 +459,18 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
                if (port == PORT3)
                        reg = GPIO3_PUDSEL;
                else
-                       reg = GPIO_PUDSEL(port);
-               if (!gpio_getbit(info->membase[0], reg, PORT_PIN(port)))
+                       reg = GPIO_PUDSEL(pin);
+               if (!gpio_getbit(info->membase[0], reg, PORT_PIN(pin)))
                        *config = LTQ_PINCONF_PACK(param, 2);
                else
                        *config = LTQ_PINCONF_PACK(param, 1);
                break;
 
+       case LTQ_PINCONF_PARAM_OUTPUT:
+               reg = GPIO_DIR(pin);
+               *config = LTQ_PINCONF_PACK(param,
+                       gpio_getbit(info->membase[0], reg, PORT_PIN(pin)));
+               break;
        default:
                dev_err(pctldev->dev, "Invalid config param %04x\n", param);
                return -ENOTSUPP;
@@ -488,33 +493,44 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
                if (port == PORT3)
                        reg = GPIO3_OD;
                else
-                       reg = GPIO_OD(port);
-               gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+                       reg = GPIO_OD(pin);
+               if (arg == 0)
+                       gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
+               else
+                       gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
                break;
 
        case LTQ_PINCONF_PARAM_PULL:
                if (port == PORT3)
                        reg = GPIO3_PUDEN;
                else
-                       reg = GPIO_PUDEN(port);
+                       reg = GPIO_PUDEN(pin);
                if (arg == 0) {
-                       gpio_clearbit(info->membase[0], reg, PORT_PIN(port));
+                       gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
                        break;
                }
-               gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+               gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
 
                if (port == PORT3)
                        reg = GPIO3_PUDSEL;
                else
-                       reg = GPIO_PUDSEL(port);
+                       reg = GPIO_PUDSEL(pin);
                if (arg == 1)
-                       gpio_clearbit(info->membase[0], reg, PORT_PIN(port));
+                       gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
                else if (arg == 2)
-                       gpio_setbit(info->membase[0], reg, PORT_PIN(port));
+                       gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
                else
                        dev_err(pctldev->dev, "Invalid pull value %d\n", arg);
                break;
 
+       case LTQ_PINCONF_PARAM_OUTPUT:
+               reg = GPIO_DIR(pin);
+               if (arg == 0)
+                       gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
+               else
+                       gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
+               break;
+
        default:
                dev_err(pctldev->dev, "Invalid config param %04x\n", param);
                return -ENOTSUPP;
@@ -522,9 +538,24 @@ static int xway_pinconf_set(struct pinctrl_dev *pctldev,
        return 0;
 }
 
+int xway_pinconf_group_set(struct pinctrl_dev *pctldev,
+                       unsigned selector,
+                       unsigned long config)
+{
+       struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
+       int i, ret = 0;
+
+       for (i = 0; i < info->grps[selector].npins && !ret; i++)
+               ret = xway_pinconf_set(pctldev,
+                               info->grps[selector].pins[i], config);
+
+       return ret;
+}
+
 static struct pinconf_ops xway_pinconf_ops = {
        .pin_config_get = xway_pinconf_get,
        .pin_config_set = xway_pinconf_set,
+       .pin_config_group_set = xway_pinconf_group_set,
 };
 
 static struct pinctrl_desc xway_pctrl_desc = {
@@ -558,6 +589,7 @@ static inline int xway_mux_apply(struct pinctrl_dev *pctrldev,
 static const struct ltq_cfg_param xway_cfg_params[] = {
        {"lantiq,pull",         LTQ_PINCONF_PARAM_PULL},
        {"lantiq,open-drain",   LTQ_PINCONF_PARAM_OPEN_DRAIN},
+       {"lantiq,output",       LTQ_PINCONF_PARAM_OUTPUT},
 };
 
 static struct ltq_pinmux_info xway_info = {
index 06f4eb7ab87e89486b96819c54ebe774c0e28601..afed7018a2b5fe9b006de12716814a2dc00502d0 100644 (file)
@@ -125,8 +125,11 @@ static const struct key_entry acer_wmi_keymap[] = {
        {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
        {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
        {KE_IGNORE, 0x81, {KEY_SLEEP} },
-       {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */
+       {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
+       {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
+       {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
        {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
+       {KE_KEY, 0x85, {KEY_TOUCHPAD_TOGGLE} },
        {KE_END, 0}
 };
 
@@ -147,6 +150,7 @@ struct event_return_value {
 #define ACER_WMID3_GDS_THREEG          (1<<6)  /* 3G */
 #define ACER_WMID3_GDS_WIMAX           (1<<7)  /* WiMAX */
 #define ACER_WMID3_GDS_BLUETOOTH       (1<<11) /* BT */
+#define ACER_WMID3_GDS_TOUCHPAD                (1<<1)  /* Touchpad */
 
 struct lm_input_params {
        u8 function_num;        /* Function Number */
@@ -875,7 +879,7 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
        struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
        struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
-       u32 tmp;
+       u32 tmp = 0;
        acpi_status status;
 
        status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
@@ -884,14 +888,14 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
                return status;
 
        obj = (union acpi_object *) result.pointer;
-       if (obj && obj->type == ACPI_TYPE_BUFFER &&
-               (obj->buffer.length == sizeof(u32) ||
-               obj->buffer.length == sizeof(u64))) {
-               tmp = *((u32 *) obj->buffer.pointer);
-       } else if (obj->type == ACPI_TYPE_INTEGER) {
-               tmp = (u32) obj->integer.value;
-       } else {
-               tmp = 0;
+       if (obj) {
+               if (obj->type == ACPI_TYPE_BUFFER &&
+                       (obj->buffer.length == sizeof(u32) ||
+                       obj->buffer.length == sizeof(u64))) {
+                       tmp = *((u32 *) obj->buffer.pointer);
+               } else if (obj->type == ACPI_TYPE_INTEGER) {
+                       tmp = (u32) obj->integer.value;
+               }
        }
 
        if (out)
@@ -1193,12 +1197,14 @@ static acpi_status WMID_set_capabilities(void)
                return status;
 
        obj = (union acpi_object *) out.pointer;
-       if (obj && obj->type == ACPI_TYPE_BUFFER &&
-               (obj->buffer.length == sizeof(u32) ||
-               obj->buffer.length == sizeof(u64))) {
-               devices = *((u32 *) obj->buffer.pointer);
-       } else if (obj->type == ACPI_TYPE_INTEGER) {
-               devices = (u32) obj->integer.value;
+       if (obj) {
+               if (obj->type == ACPI_TYPE_BUFFER &&
+                       (obj->buffer.length == sizeof(u32) ||
+                       obj->buffer.length == sizeof(u64))) {
+                       devices = *((u32 *) obj->buffer.pointer);
+               } else if (obj->type == ACPI_TYPE_INTEGER) {
+                       devices = (u32) obj->integer.value;
+               }
        } else {
                kfree(out.pointer);
                return AE_ERROR;
@@ -1676,6 +1682,7 @@ static void acer_wmi_notify(u32 value, void *context)
        acpi_status status;
        u16 device_state;
        const struct key_entry *key;
+       u32 scancode;
 
        status = wmi_get_event_data(value, &response);
        if (status != AE_OK) {
@@ -1712,6 +1719,7 @@ static void acer_wmi_notify(u32 value, void *context)
                        pr_warn("Unknown key number - 0x%x\n",
                                return_value.key_num);
                } else {
+                       scancode = return_value.key_num;
                        switch (key->keycode) {
                        case KEY_WLAN:
                        case KEY_BLUETOOTH:
@@ -1725,9 +1733,11 @@ static void acer_wmi_notify(u32 value, void *context)
                                        rfkill_set_sw_state(bluetooth_rfkill,
                                                !(device_state & ACER_WMID3_GDS_BLUETOOTH));
                                break;
+                       case KEY_TOUCHPAD_TOGGLE:
+                               scancode = (device_state & ACER_WMID3_GDS_TOUCHPAD) ?
+                                               KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF;
                        }
-                       sparse_keymap_report_entry(acer_wmi_input_dev, key,
-                                                  1, true);
+                       sparse_keymap_report_event(acer_wmi_input_dev, scancode, 1, true);
                }
                break;
        case WMID_ACCEL_EVENT:
@@ -1946,12 +1956,14 @@ static u32 get_wmid_devices(void)
                return 0;
 
        obj = (union acpi_object *) out.pointer;
-       if (obj && obj->type == ACPI_TYPE_BUFFER &&
-               (obj->buffer.length == sizeof(u32) ||
-               obj->buffer.length == sizeof(u64))) {
-               devices = *((u32 *) obj->buffer.pointer);
-       } else if (obj->type == ACPI_TYPE_INTEGER) {
-               devices = (u32) obj->integer.value;
+       if (obj) {
+               if (obj->type == ACPI_TYPE_BUFFER &&
+                       (obj->buffer.length == sizeof(u32) ||
+                       obj->buffer.length == sizeof(u64))) {
+                       devices = *((u32 *) obj->buffer.pointer);
+               } else if (obj->type == ACPI_TYPE_INTEGER) {
+                       devices = (u32) obj->integer.value;
+               }
        }
 
        kfree(out.pointer);
index ec1d3bc2dbe2e1a1ac48c7cb6d26bd096e70b984..fcde4e528819c04daf4b919ba00d70a20f7456c3 100644 (file)
@@ -860,8 +860,10 @@ static ssize_t show_infos(struct device *dev,
        /*
         * The HWRS method return informations about the hardware.
         * 0x80 bit is for WLAN, 0x100 for Bluetooth.
+        * 0x40 for WWAN, 0x10 for WIMAX.
         * The significance of others is yet to be found.
-        * If we don't find the method, we assume the device are present.
+        * We don't currently use this for device detection, and it
+        * takes several seconds to run on some systems.
         */
        rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp);
        if (!ACPI_FAILURE(rv))
@@ -1682,7 +1684,7 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *model = NULL;
-       unsigned long long bsts_result, hwrs_result;
+       unsigned long long bsts_result;
        char *string = NULL;
        acpi_status status;
 
@@ -1741,20 +1743,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
                return -ENOMEM;
        }
 
-       if (*string)
+       if (string)
                pr_notice("  %s model detected\n", string);
 
-       /*
-        * The HWRS method return informations about the hardware.
-        * 0x80 bit is for WLAN, 0x100 for Bluetooth,
-        * 0x40 for WWAN, 0x10 for WIMAX.
-        * The significance of others is yet to be found.
-        */
-       status =
-           acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result);
-       if (!ACPI_FAILURE(status))
-               pr_notice("  HWRS returned %x", (int)hwrs_result);
-
        if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
                asus->have_rsts = true;
 
index 528e9495458d35f3396efeb6a3ce6e78e201bcd9..a5da0b594f71bcfcc88e17d07335beb3cd318d00 100644 (file)
@@ -1007,7 +1007,7 @@ static int eeepc_get_fan_pwm(void)
 
 static void eeepc_set_fan_pwm(int value)
 {
-       value = SENSORS_LIMIT(value, 0, 255);
+       value = clamp_val(value, 0, 255);
        value = value * 100 / 255;
        ec_write(EEEPC_EC_FAN_PWM, value);
 }
index 7481146a5b473c1cab6745a7d6ac1ae76e66257e..97c2be195efc36eeb76bd848f2c1327be7f168c2 100644 (file)
@@ -244,7 +244,7 @@ static int __init ibm_rtl_init(void) {
        if (force)
                pr_warn("module loaded by force\n");
        /* first ensure that we are running on IBM HW */
-       else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
+       else if (efi_enabled(EFI_BOOT) || !dmi_check_system(ibm_rtl_dmi_table))
                return -ENODEV;
 
        /* Get the address for the Extended BIOS Data Area */
index dd90d15f52101e24296b30523d68056e01bac7df..d1f0300531766f64ab741ccbd4d388db0644d427 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
+#include <linux/efi.h>
 #include <acpi/video.h>
 
 /*
@@ -1523,6 +1524,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
         .driver_data = &samsung_broken_acpi_video,
        },
+       {
+        .callback = samsung_dmi_matched,
+        .ident = "N250P",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
+               DMI_MATCH(DMI_BOARD_NAME, "N250P"),
+               },
+        .driver_data = &samsung_broken_acpi_video,
+       },
        { },
 };
 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1534,6 +1545,9 @@ static int __init samsung_init(void)
        struct samsung_laptop *samsung;
        int ret;
 
+       if (efi_enabled(EFI_BOOT))
+               return -ENODEV;
+
        quirks = &samsung_unknown;
        if (!force && !dmi_check_system(samsung_dmi_table))
                return -ENODEV;
index daaddec68def7e1c7cb11e4fe6d54e4193943c55..b8ad71f7863fa2b0fa9c741e9605ffdfe5a92fcf 100644 (file)
@@ -786,28 +786,29 @@ static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
 static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
                void *buffer, size_t buflen)
 {
+       int ret = 0;
        size_t len = len;
        union acpi_object *object = __call_snc_method(handle, name, value);
 
        if (!object)
                return -EINVAL;
 
-       if (object->type == ACPI_TYPE_BUFFER)
+       if (object->type == ACPI_TYPE_BUFFER) {
                len = MIN(buflen, object->buffer.length);
+               memcpy(buffer, object->buffer.pointer, len);
 
-       else if (object->type == ACPI_TYPE_INTEGER)
+       } else if (object->type == ACPI_TYPE_INTEGER) {
                len = MIN(buflen, sizeof(object->integer.value));
+               memcpy(buffer, &object->integer.value, len);
 
-       else {
+       else {
                pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
                                ACPI_TYPE_BUFFER, object->type);
-               kfree(object);
-               return -EINVAL;
+               ret = -EINVAL;
        }
 
-       memcpy(buffer, object->buffer.pointer, len);
        kfree(object);
-       return 0;
+       return ret;
 }
 
 struct sony_nc_handles {
index f946ca7cb76271a321ce900f5c59b3a07538552b..ebcb461bb2b08883e93ddbbd449f49127623e4aa 100644 (file)
@@ -4877,8 +4877,7 @@ static int __init light_init(struct ibm_init_struct *iibm)
 static void light_exit(void)
 {
        led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
-       if (work_pending(&tpacpi_led_thinklight.work))
-               flush_workqueue(tpacpi_wq);
+       flush_workqueue(tpacpi_wq);
 }
 
 static int light_read(struct seq_file *m)
index 8bc80b05c63c86b17ebe5a9b2c9abbc9ca8e0f9d..d338c1c4e8c8522c88014861a5f93b213e0191d1 100644 (file)
@@ -915,15 +915,13 @@ static int pm860x_battery_probe(struct platform_device *pdev)
        info->irq_cc = platform_get_irq(pdev, 0);
        if (info->irq_cc <= 0) {
                dev_err(&pdev->dev, "No IRQ resource!\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        info->irq_batt = platform_get_irq(pdev, 1);
        if (info->irq_batt <= 0) {
                dev_err(&pdev->dev, "No IRQ resource!\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        info->chip = chip;
@@ -957,7 +955,7 @@ static int pm860x_battery_probe(struct platform_device *pdev)
 
        ret = power_supply_register(&pdev->dev, &info->battery);
        if (ret)
-               goto out;
+               return ret;
        info->battery.dev->parent = &pdev->dev;
 
        ret = request_threaded_irq(info->irq_cc, NULL,
@@ -984,8 +982,6 @@ out_coulomb:
        free_irq(info->irq_cc, info);
 out_reg:
        power_supply_unregister(&info->battery);
-out:
-       kfree(info);
        return ret;
 }
 
@@ -993,10 +989,9 @@ static int pm860x_battery_remove(struct platform_device *pdev)
 {
        struct pm860x_battery_info *info = platform_get_drvdata(pdev);
 
-       power_supply_unregister(&info->battery);
        free_irq(info->irq_batt, info);
        free_irq(info->irq_cc, info);
-       kfree(info);
+       power_supply_unregister(&info->battery);
        platform_set_drvdata(pdev, NULL);
        return 0;
 }
index 9f45e2f77d531748016db6799a39fcd7465b6ab9..9e00c389e777d95d43abf84836a901f1a7a4e3e2 100644 (file)
@@ -346,6 +346,20 @@ config AB8500_BM
        help
          Say Y to include support for AB8500 battery management.
 
+config BATTERY_GOLDFISH
+       tristate "Goldfish battery driver"
+       depends on GENERIC_HARDIRQS
+       help
+         Say Y to enable support for the battery and AC power in the
+         Goldfish emulator.
+
+config CHARGER_PM2301
+       bool "PM2301 Battery Charger Driver"
+       depends on AB8500_BM
+       help
+         Say Y to include support for PM2301 charger driver.
+         Depends on AB8500 battery management core.
+
 source "drivers/power/reset/Kconfig"
 
 endif # POWER_SUPPLY
index 22c8913382c0839bda10690268fb740e5ea42999..3f66436af45cc4a2b630c9170b5bf0a35ccb49ee 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_BATTERY_DS2760)  += ds2760_battery.o
 obj-$(CONFIG_BATTERY_DS2780)   += ds2780_battery.o
 obj-$(CONFIG_BATTERY_DS2781)   += ds2781_battery.o
 obj-$(CONFIG_BATTERY_DS2782)   += ds2782_battery.o
+obj-$(CONFIG_BATTERY_GOLDFISH) += goldfish_battery.o
 obj-$(CONFIG_BATTERY_PMU)      += pmu_battery.o
 obj-$(CONFIG_BATTERY_OLPC)     += olpc_battery.o
 obj-$(CONFIG_BATTERY_TOSA)     += tosa_battery.o
@@ -38,7 +39,7 @@ obj-$(CONFIG_CHARGER_PCF50633)        += pcf50633-charger.o
 obj-$(CONFIG_BATTERY_JZ4740)   += jz4740-battery.o
 obj-$(CONFIG_BATTERY_INTEL_MID)        += intel_mid_battery.o
 obj-$(CONFIG_BATTERY_RX51)     += rx51_battery.o
-obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_btemp.o ab8500_fg.o abx500_chargalg.o
+obj-$(CONFIG_AB8500_BM)                += ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
 obj-$(CONFIG_CHARGER_ISP1704)  += isp1704_charger.o
 obj-$(CONFIG_CHARGER_MAX8903)  += max8903_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)  += twl4030_charger.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_CHARGER_LP8727)  += lp8727_charger.o
 obj-$(CONFIG_CHARGER_LP8788)   += lp8788-charger.o
 obj-$(CONFIG_CHARGER_GPIO)     += gpio-charger.o
 obj-$(CONFIG_CHARGER_MANAGER)  += charger-manager.o
+obj-$(CONFIG_CHARGER_PM2301)   += pm2301_charger.o
 obj-$(CONFIG_CHARGER_MAX8997)  += max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)  += max8998_charger.o
 obj-$(CONFIG_CHARGER_BQ2415X)  += bq2415x_charger.o
index f034ae43e045ac03127adeca8f1db0f4cbe59b79..7a96c0650fbbc1daf8c3fdf1dbc699a66b7d7a32 100644 (file)
@@ -182,206 +182,206 @@ static struct batres_vs_temp temp_to_batres_tbl_9100[] = {
 };
 
 static struct abx500_battery_type bat_type_thermistor[] = {
-[BATTERY_UNKNOWN] = {
-       /* First element always represent the UNKNOWN battery */
-       .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
-       .resis_high = 0,
-       .resis_low = 0,
-       .battery_resistance = 300,
-       .charge_full_design = 612,
-       .nominal_voltage = 3700,
-       .termination_vol = 4050,
-       .termination_curr = 200,
-       .recharge_vol = 3990,
-       .normal_cur_lvl = 400,
-       .normal_vol_lvl = 4100,
-       .maint_a_cur_lvl = 400,
-       .maint_a_vol_lvl = 4050,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 400,
-       .maint_b_vol_lvl = 4000,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
-       .r_to_t_tbl = temp_tbl,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
-       .v_to_cap_tbl = cap_tbl,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
-},
-{
-       .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
-       .resis_high = 53407,
-       .resis_low = 12500,
-       .battery_resistance = 300,
-       .charge_full_design = 900,
-       .nominal_voltage = 3600,
-       .termination_vol = 4150,
-       .termination_curr = 80,
-       .recharge_vol = 4130,
-       .normal_cur_lvl = 700,
-       .normal_vol_lvl = 4200,
-       .maint_a_cur_lvl = 600,
-       .maint_a_vol_lvl = 4150,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 600,
-       .maint_b_vol_lvl = 4100,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor),
-       .r_to_t_tbl = temp_tbl_A_thermistor,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor),
-       .v_to_cap_tbl = cap_tbl_A_thermistor,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
+       [BATTERY_UNKNOWN] = {
+               /* First element always represent the UNKNOWN battery */
+               .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+               .resis_high = 0,
+               .resis_low = 0,
+               .battery_resistance = 300,
+               .charge_full_design = 612,
+               .nominal_voltage = 3700,
+               .termination_vol = 4050,
+               .termination_curr = 200,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 400,
+               .normal_vol_lvl = 4100,
+               .maint_a_cur_lvl = 400,
+               .maint_a_vol_lvl = 4050,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 400,
+               .maint_b_vol_lvl = 4000,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+               .r_to_t_tbl = temp_tbl,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+               .v_to_cap_tbl = cap_tbl,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
+       },
+       {
+               .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+               .resis_high = 53407,
+               .resis_low = 12500,
+               .battery_resistance = 300,
+               .charge_full_design = 900,
+               .nominal_voltage = 3600,
+               .termination_vol = 4150,
+               .termination_curr = 80,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 700,
+               .normal_vol_lvl = 4200,
+               .maint_a_cur_lvl = 600,
+               .maint_a_vol_lvl = 4150,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 600,
+               .maint_b_vol_lvl = 4100,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor),
+               .r_to_t_tbl = temp_tbl_A_thermistor,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor),
+               .v_to_cap_tbl = cap_tbl_A_thermistor,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
 
-},
-{
-       .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
-       .resis_high = 200000,
-       .resis_low = 82869,
-       .battery_resistance = 300,
-       .charge_full_design = 900,
-       .nominal_voltage = 3600,
-       .termination_vol = 4150,
-       .termination_curr = 80,
-       .recharge_vol = 4130,
-       .normal_cur_lvl = 700,
-       .normal_vol_lvl = 4200,
-       .maint_a_cur_lvl = 600,
-       .maint_a_vol_lvl = 4150,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 600,
-       .maint_b_vol_lvl = 4100,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor),
-       .r_to_t_tbl = temp_tbl_B_thermistor,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor),
-       .v_to_cap_tbl = cap_tbl_B_thermistor,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
-},
+       },
+       {
+               .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+               .resis_high = 200000,
+               .resis_low = 82869,
+               .battery_resistance = 300,
+               .charge_full_design = 900,
+               .nominal_voltage = 3600,
+               .termination_vol = 4150,
+               .termination_curr = 80,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 700,
+               .normal_vol_lvl = 4200,
+               .maint_a_cur_lvl = 600,
+               .maint_a_vol_lvl = 4150,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 600,
+               .maint_b_vol_lvl = 4100,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor),
+               .r_to_t_tbl = temp_tbl_B_thermistor,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor),
+               .v_to_cap_tbl = cap_tbl_B_thermistor,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
+       },
 };
 
 static struct abx500_battery_type bat_type_ext_thermistor[] = {
-[BATTERY_UNKNOWN] = {
-       /* First element always represent the UNKNOWN battery */
-       .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
-       .resis_high = 0,
-       .resis_low = 0,
-       .battery_resistance = 300,
-       .charge_full_design = 612,
-       .nominal_voltage = 3700,
-       .termination_vol = 4050,
-       .termination_curr = 200,
-       .recharge_vol = 3990,
-       .normal_cur_lvl = 400,
-       .normal_vol_lvl = 4100,
-       .maint_a_cur_lvl = 400,
-       .maint_a_vol_lvl = 4050,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 400,
-       .maint_b_vol_lvl = 4000,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
-       .r_to_t_tbl = temp_tbl,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
-       .v_to_cap_tbl = cap_tbl,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
-},
+       [BATTERY_UNKNOWN] = {
+               /* First element always represent the UNKNOWN battery */
+               .name = POWER_SUPPLY_TECHNOLOGY_UNKNOWN,
+               .resis_high = 0,
+               .resis_low = 0,
+               .battery_resistance = 300,
+               .charge_full_design = 612,
+               .nominal_voltage = 3700,
+               .termination_vol = 4050,
+               .termination_curr = 200,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 400,
+               .normal_vol_lvl = 4100,
+               .maint_a_cur_lvl = 400,
+               .maint_a_vol_lvl = 4050,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 400,
+               .maint_b_vol_lvl = 4000,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+               .r_to_t_tbl = temp_tbl,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+               .v_to_cap_tbl = cap_tbl,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
+       },
 /*
  * These are the batteries that doesn't have an internal NTC resistor to measure
  * its temperature. The temperature in this case is measure with a NTC placed
  * near the battery but on the PCB.
  */
-{
-       .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
-       .resis_high = 76000,
-       .resis_low = 53000,
-       .battery_resistance = 300,
-       .charge_full_design = 900,
-       .nominal_voltage = 3700,
-       .termination_vol = 4150,
-       .termination_curr = 100,
-       .recharge_vol = 4130,
-       .normal_cur_lvl = 700,
-       .normal_vol_lvl = 4200,
-       .maint_a_cur_lvl = 600,
-       .maint_a_vol_lvl = 4150,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 600,
-       .maint_b_vol_lvl = 4100,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
-       .r_to_t_tbl = temp_tbl,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
-       .v_to_cap_tbl = cap_tbl,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
-},
-{
-       .name = POWER_SUPPLY_TECHNOLOGY_LION,
-       .resis_high = 30000,
-       .resis_low = 10000,
-       .battery_resistance = 300,
-       .charge_full_design = 950,
-       .nominal_voltage = 3700,
-       .termination_vol = 4150,
-       .termination_curr = 100,
-       .recharge_vol = 4130,
-       .normal_cur_lvl = 700,
-       .normal_vol_lvl = 4200,
-       .maint_a_cur_lvl = 600,
-       .maint_a_vol_lvl = 4150,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 600,
-       .maint_b_vol_lvl = 4100,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
-       .r_to_t_tbl = temp_tbl,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
-       .v_to_cap_tbl = cap_tbl,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
-},
-{
-       .name = POWER_SUPPLY_TECHNOLOGY_LION,
-       .resis_high = 95000,
-       .resis_low = 76001,
-       .battery_resistance = 300,
-       .charge_full_design = 950,
-       .nominal_voltage = 3700,
-       .termination_vol = 4150,
-       .termination_curr = 100,
-       .recharge_vol = 4130,
-       .normal_cur_lvl = 700,
-       .normal_vol_lvl = 4200,
-       .maint_a_cur_lvl = 600,
-       .maint_a_vol_lvl = 4150,
-       .maint_a_chg_timer_h = 60,
-       .maint_b_cur_lvl = 600,
-       .maint_b_vol_lvl = 4100,
-       .maint_b_chg_timer_h = 200,
-       .low_high_cur_lvl = 300,
-       .low_high_vol_lvl = 4000,
-       .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
-       .r_to_t_tbl = temp_tbl,
-       .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
-       .v_to_cap_tbl = cap_tbl,
-       .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
-       .batres_tbl = temp_to_batres_tbl_thermistor,
-},
+       {
+               .name = POWER_SUPPLY_TECHNOLOGY_LIPO,
+               .resis_high = 76000,
+               .resis_low = 53000,
+               .battery_resistance = 300,
+               .charge_full_design = 900,
+               .nominal_voltage = 3700,
+               .termination_vol = 4150,
+               .termination_curr = 100,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 700,
+               .normal_vol_lvl = 4200,
+               .maint_a_cur_lvl = 600,
+               .maint_a_vol_lvl = 4150,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 600,
+               .maint_b_vol_lvl = 4100,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+               .r_to_t_tbl = temp_tbl,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+               .v_to_cap_tbl = cap_tbl,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
+       },
+       {
+               .name = POWER_SUPPLY_TECHNOLOGY_LION,
+               .resis_high = 30000,
+               .resis_low = 10000,
+               .battery_resistance = 300,
+               .charge_full_design = 950,
+               .nominal_voltage = 3700,
+               .termination_vol = 4150,
+               .termination_curr = 100,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 700,
+               .normal_vol_lvl = 4200,
+               .maint_a_cur_lvl = 600,
+               .maint_a_vol_lvl = 4150,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 600,
+               .maint_b_vol_lvl = 4100,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+               .r_to_t_tbl = temp_tbl,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+               .v_to_cap_tbl = cap_tbl,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
+       },
+       {
+               .name = POWER_SUPPLY_TECHNOLOGY_LION,
+               .resis_high = 95000,
+               .resis_low = 76001,
+               .battery_resistance = 300,
+               .charge_full_design = 950,
+               .nominal_voltage = 3700,
+               .termination_vol = 4150,
+               .termination_curr = 100,
+               .recharge_cap = 95,
+               .normal_cur_lvl = 700,
+               .normal_vol_lvl = 4200,
+               .maint_a_cur_lvl = 600,
+               .maint_a_vol_lvl = 4150,
+               .maint_a_chg_timer_h = 60,
+               .maint_b_cur_lvl = 600,
+               .maint_b_vol_lvl = 4100,
+               .maint_b_chg_timer_h = 200,
+               .low_high_cur_lvl = 300,
+               .low_high_vol_lvl = 4000,
+               .n_temp_tbl_elements = ARRAY_SIZE(temp_tbl),
+               .r_to_t_tbl = temp_tbl,
+               .n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl),
+               .v_to_cap_tbl = cap_tbl,
+               .n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
+               .batres_tbl = temp_to_batres_tbl_thermistor,
+       },
 };
 
 static const struct abx500_bm_capacity_levels cap_levels = {
@@ -405,8 +405,8 @@ static const struct abx500_fg_parameters fg = {
        .lowbat_threshold = 3100,
        .battok_falling_th_sel0 = 2860,
        .battok_raising_th_sel1 = 2860,
+       .maint_thres = 95,
        .user_cap_limit = 15,
-       .maint_thres = 97,
 };
 
 static const struct abx500_maxim_parameters maxi_params = {
@@ -424,96 +424,84 @@ static const struct abx500_bm_charger_parameters chg = {
 };
 
 struct abx500_bm_data ab8500_bm_data = {
-       .temp_under             = 3,
-       .temp_low               = 8,
-       .temp_high              = 43,
-       .temp_over              = 48,
-       .main_safety_tmr_h      = 4,
-       .temp_interval_chg      = 20,
-       .temp_interval_nochg    = 120,
-       .usb_safety_tmr_h       = 4,
-       .bkup_bat_v             = BUP_VCH_SEL_2P6V,
-       .bkup_bat_i             = BUP_ICH_SEL_150UA,
-       .no_maintenance         = false,
-       .adc_therm              = ABx500_ADC_THERM_BATCTRL,
-       .chg_unknown_bat        = false,
-       .enable_overshoot       = false,
-       .fg_res                 = 100,
-       .cap_levels             = &cap_levels,
-       .bat_type               = bat_type_thermistor,
-       .n_btypes               = 3,
-       .batt_id                = 0,
-       .interval_charging      = 5,
-       .interval_not_charging  = 120,
-       .temp_hysteresis        = 3,
-       .gnd_lift_resistance    = 34,
-       .maxi                   = &maxi_params,
-       .chg_params             = &chg,
-       .fg_params              = &fg,
+       .temp_under             = 3,
+       .temp_low               = 8,
+       .temp_high              = 43,
+       .temp_over              = 48,
+       .main_safety_tmr_h      = 4,
+       .temp_interval_chg      = 20,
+       .temp_interval_nochg    = 120,
+       .usb_safety_tmr_h       = 4,
+       .bkup_bat_v             = BUP_VCH_SEL_2P6V,
+       .bkup_bat_i             = BUP_ICH_SEL_150UA,
+       .no_maintenance         = false,
+       .capacity_scaling       = false,
+       .adc_therm              = ABx500_ADC_THERM_BATCTRL,
+       .chg_unknown_bat        = false,
+       .enable_overshoot       = false,
+       .fg_res                 = 100,
+       .cap_levels             = &cap_levels,
+       .bat_type               = bat_type_thermistor,
+       .n_btypes               = 3,
+       .batt_id                = 0,
+       .interval_charging      = 5,
+       .interval_not_charging  = 120,
+       .temp_hysteresis        = 3,
+       .gnd_lift_resistance    = 34,
+       .maxi                   = &maxi_params,
+       .chg_params             = &chg,
+       .fg_params              = &fg,
 };
 
-int bmdevs_of_probe(struct device *dev, struct device_node *np,
-                   struct abx500_bm_data **battery)
+int ab8500_bm_of_probe(struct device *dev,
+                      struct device_node *np,
+                      struct abx500_bm_data *bm)
 {
-       struct  abx500_battery_type *btype;
-       struct  device_node *np_bat_supply;
-       struct  abx500_bm_data *bat;
+       struct batres_vs_temp *tmp_batres_tbl;
+       struct device_node *battery_node;
        const char *btech;
-       char bat_tech[8];
-       int i, thermistor;
-
-       *battery = &ab8500_bm_data;
+       int i;
 
        /* get phandle to 'battery-info' node */
-       np_bat_supply = of_parse_phandle(np, "battery", 0);
-       if (!np_bat_supply) {
-               dev_err(dev, "missing property battery\n");
+       battery_node = of_parse_phandle(np, "battery", 0);
+       if (!battery_node) {
+               dev_err(dev, "battery node or reference missing\n");
                return -EINVAL;
        }
-       if (of_property_read_bool(np_bat_supply,
-                       "thermistor-on-batctrl"))
-               thermistor = NTC_INTERNAL;
-       else
-               thermistor = NTC_EXTERNAL;
 
-       bat = *battery;
-       if (thermistor == NTC_EXTERNAL) {
-               bat->n_btypes  = 4;
-               bat->bat_type  = bat_type_ext_thermistor;
-               bat->adc_therm = ABx500_ADC_THERM_BATTEMP;
-       }
-       btech = of_get_property(np_bat_supply,
-               "stericsson,battery-type", NULL);
+       btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
        if (!btech) {
                dev_warn(dev, "missing property battery-name/type\n");
-               strcpy(bat_tech, "UNKNOWN");
-       } else {
-               strcpy(bat_tech, btech);
+               return -EINVAL;
        }
 
-       if (strncmp(bat_tech, "LION", 4) == 0) {
-               bat->no_maintenance  = true;
-               bat->chg_unknown_bat = true;
-               bat->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600;
-               bat->bat_type[BATTERY_UNKNOWN].termination_vol    = 4150;
-               bat->bat_type[BATTERY_UNKNOWN].recharge_vol       = 4130;
-               bat->bat_type[BATTERY_UNKNOWN].normal_cur_lvl     = 520;
-               bat->bat_type[BATTERY_UNKNOWN].normal_vol_lvl     = 4200;
+       if (strncmp(btech, "LION", 4) == 0) {
+               bm->no_maintenance  = true;
+               bm->chg_unknown_bat = true;
+               bm->bat_type[BATTERY_UNKNOWN].charge_full_design = 2600;
+               bm->bat_type[BATTERY_UNKNOWN].termination_vol    = 4150;
+               bm->bat_type[BATTERY_UNKNOWN].recharge_cap       = 95;
+               bm->bat_type[BATTERY_UNKNOWN].normal_cur_lvl     = 520;
+               bm->bat_type[BATTERY_UNKNOWN].normal_vol_lvl     = 4200;
        }
-       /* select the battery resolution table */
-       for (i = 0; i < bat->n_btypes; ++i) {
-               btype = (bat->bat_type + i);
-               if (thermistor == NTC_EXTERNAL) {
-                       btype->batres_tbl =
-                               temp_to_batres_tbl_ext_thermistor;
-               } else if (strncmp(bat_tech, "LION", 4) == 0) {
-                       btype->batres_tbl =
-                               temp_to_batres_tbl_9100;
-               } else {
-                       btype->batres_tbl =
-                               temp_to_batres_tbl_thermistor;
-               }
+
+       if (of_property_read_bool(battery_node, "thermistor-on-batctrl")) {
+               if (strncmp(btech, "LION", 4) == 0)
+                       tmp_batres_tbl = temp_to_batres_tbl_9100;
+               else
+                       tmp_batres_tbl = temp_to_batres_tbl_thermistor;
+       } else {
+               bm->n_btypes   = 4;
+               bm->bat_type   = bat_type_ext_thermistor;
+               bm->adc_therm  = ABx500_ADC_THERM_BATTEMP;
+               tmp_batres_tbl = temp_to_batres_tbl_ext_thermistor;
        }
-       of_node_put(np_bat_supply);
+
+       /* select the battery resolution table */
+       for (i = 0; i < bm->n_btypes; ++i)
+               bm->bat_type[i].batres_tbl = tmp_batres_tbl;
+
+       of_node_put(battery_node);
+
        return 0;
 }
index 20e2a7d3ef43c0f6877aae8e5b3cb9c08fc6a9ae..07689064996ecccba2cfe5196b6826c364c2a687 100644 (file)
@@ -39,6 +39,9 @@
 #define BTEMP_BATCTRL_CURR_SRC_7UA     7
 #define BTEMP_BATCTRL_CURR_SRC_20UA    20
 
+#define BTEMP_BATCTRL_CURR_SRC_16UA    16
+#define BTEMP_BATCTRL_CURR_SRC_18UA    18
+
 #define to_ab8500_btemp_device_info(x) container_of((x), \
        struct ab8500_btemp, btemp_psy);
 
@@ -78,12 +81,13 @@ struct ab8500_btemp_ranges {
  * @parent:            Pointer to the struct ab8500
  * @gpadc:             Pointer to the struct gpadc
  * @fg:                        Pointer to the struct fg
- * @bat:               Pointer to the abx500_bm platform data
+ * @bm:                Platform specific battery management information
  * @btemp_psy:         Structure for BTEMP specific battery properties
  * @events:            Structure for information about events triggered
  * @btemp_ranges:      Battery temperature range structure
  * @btemp_wq:          Work queue for measuring the temperature periodically
  * @btemp_periodic_work:       Work for measuring the temperature periodically
+ * @initialized:       True if battery id read.
  */
 struct ab8500_btemp {
        struct device *dev;
@@ -94,12 +98,13 @@ struct ab8500_btemp {
        struct ab8500 *parent;
        struct ab8500_gpadc *gpadc;
        struct ab8500_fg *fg;
-       struct abx500_bm_data *bat;
+       struct abx500_bm_data *bm;
        struct power_supply btemp_psy;
        struct ab8500_btemp_events events;
        struct ab8500_btemp_ranges btemp_ranges;
        struct workqueue_struct *btemp_wq;
        struct delayed_work btemp_periodic_work;
+       bool initialized;
 };
 
 /* BTEMP power supply properties */
@@ -147,13 +152,13 @@ static int ab8500_btemp_batctrl_volt_to_res(struct ab8500_btemp *di,
                return (450000 * (v_batctrl)) / (1800 - v_batctrl);
        }
 
-       if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL) {
+       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
                /*
                 * If the battery has internal NTC, we use the current
                 * source to calculate the resistance, 7uA or 20uA
                 */
                rbs = (v_batctrl * 1000
-                      - di->bat->gnd_lift_resistance * inst_curr)
+                      - di->bm->gnd_lift_resistance * inst_curr)
                      / di->curr_source;
        } else {
                /*
@@ -209,11 +214,19 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                return 0;
 
        /* Only do this for batteries with internal NTC */
-       if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
-               if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
-                       curr = BAT_CTRL_7U_ENA;
-               else
-                       curr = BAT_CTRL_20U_ENA;
+       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
+
+               if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+                       if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_16UA)
+                               curr = BAT_CTRL_16U_ENA;
+                       else
+                               curr = BAT_CTRL_18U_ENA;
+               } else {
+                       if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_7UA)
+                               curr = BAT_CTRL_7U_ENA;
+                       else
+                               curr = BAT_CTRL_20U_ENA;
+               }
 
                dev_dbg(di->dev, "Set BATCTRL %duA\n", di->curr_source);
 
@@ -241,14 +254,25 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
                                __func__);
                        goto disable_curr_source;
                }
-       } else if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
+       } else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
                dev_dbg(di->dev, "Disable BATCTRL curr source\n");
 
-               /* Write 0 to the curr bits */
-               ret = abx500_mask_and_set_register_interruptible(di->dev,
-                       AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
-                       BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
-                       ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+               if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+                       /* Write 0 to the curr bits */
+                       ret = abx500_mask_and_set_register_interruptible(
+                               di->dev,
+                               AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+                               BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
+                               ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
+               } else {
+                       /* Write 0 to the curr bits */
+                       ret = abx500_mask_and_set_register_interruptible(
+                               di->dev,
+                               AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+                               BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
+                               ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+               }
+
                if (ret) {
                        dev_err(di->dev, "%s failed disabling current source\n",
                                __func__);
@@ -290,11 +314,20 @@ static int ab8500_btemp_curr_source_enable(struct ab8500_btemp *di,
         * if we got an error above
         */
 disable_curr_source:
-       /* Write 0 to the curr bits */
-       ret = abx500_mask_and_set_register_interruptible(di->dev,
+       if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+               /* Write 0 to the curr bits */
+               ret = abx500_mask_and_set_register_interruptible(di->dev,
+                       AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+                       BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA,
+                       ~(BAT_CTRL_16U_ENA | BAT_CTRL_18U_ENA));
+       } else {
+               /* Write 0 to the curr bits */
+               ret = abx500_mask_and_set_register_interruptible(di->dev,
                        AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
                        BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA,
                        ~(BAT_CTRL_7U_ENA | BAT_CTRL_20U_ENA));
+       }
+
        if (ret) {
                dev_err(di->dev, "%s failed disabling current source\n",
                        __func__);
@@ -372,13 +405,10 @@ static int ab8500_btemp_get_batctrl_res(struct ab8500_btemp *di)
                return ret;
        }
 
-       /*
-        * Since there is no interrupt when current measurement is done,
-        * loop for over 250ms (250ms is one sample conversion time
-        * with 32.768 Khz RTC clock). Note that a stop time must be set
-        * since the ab8500_btemp_read_batctrl_voltage call can block and
-        * take an unknown amount of time to complete.
-        */
+       do {
+               msleep(20);
+       } while (!ab8500_fg_inst_curr_started(di->fg));
+
        i = 0;
 
        do {
@@ -457,9 +487,9 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
        int rbat, rntc, vntc;
        u8 id;
 
-       id = di->bat->batt_id;
+       id = di->bm->batt_id;
 
-       if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
                        id != BATTERY_UNKNOWN) {
 
                rbat = ab8500_btemp_get_batctrl_res(di);
@@ -474,8 +504,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
                }
 
                temp = ab8500_btemp_res_to_temp(di,
-                       di->bat->bat_type[id].r_to_t_tbl,
-                       di->bat->bat_type[id].n_temp_tbl_elements, rbat);
+                       di->bm->bat_type[id].r_to_t_tbl,
+                       di->bm->bat_type[id].n_temp_tbl_elements, rbat);
        } else {
                vntc = ab8500_gpadc_convert(di->gpadc, BTEMP_BALL);
                if (vntc < 0) {
@@ -491,8 +521,8 @@ static int ab8500_btemp_measure_temp(struct ab8500_btemp *di)
                rntc = 230000 * vntc / (VTVOUT_V - vntc);
 
                temp = ab8500_btemp_res_to_temp(di,
-                       di->bat->bat_type[id].r_to_t_tbl,
-                       di->bat->bat_type[id].n_temp_tbl_elements, rntc);
+                       di->bm->bat_type[id].r_to_t_tbl,
+                       di->bm->bat_type[id].n_temp_tbl_elements, rntc);
                prev = temp;
        }
        dev_dbg(di->dev, "Battery temperature is %d\n", temp);
@@ -511,9 +541,12 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
 {
        int res;
        u8 i;
+       if (is_ab9540(di->parent) || is_ab8505(di->parent))
+               di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
+       else
+               di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
 
-       di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
-       di->bat->batt_id = BATTERY_UNKNOWN;
+       di->bm->batt_id = BATTERY_UNKNOWN;
 
        res =  ab8500_btemp_get_batctrl_res(di);
        if (res < 0) {
@@ -522,23 +555,23 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
        }
 
        /* BATTERY_UNKNOWN is defined on position 0, skip it! */
-       for (i = BATTERY_UNKNOWN + 1; i < di->bat->n_btypes; i++) {
-               if ((res <= di->bat->bat_type[i].resis_high) &&
-                       (res >= di->bat->bat_type[i].resis_low)) {
+       for (i = BATTERY_UNKNOWN + 1; i < di->bm->n_btypes; i++) {
+               if ((res <= di->bm->bat_type[i].resis_high) &&
+                       (res >= di->bm->bat_type[i].resis_low)) {
                        dev_dbg(di->dev, "Battery detected on %s"
                                " low %d < res %d < high: %d"
                                " index: %d\n",
-                               di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL ?
+                               di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL ?
                                "BATCTRL" : "BATTEMP",
-                               di->bat->bat_type[i].resis_low, res,
-                               di->bat->bat_type[i].resis_high, i);
+                               di->bm->bat_type[i].resis_low, res,
+                               di->bm->bat_type[i].resis_high, i);
 
-                       di->bat->batt_id = i;
+                       di->bm->batt_id = i;
                        break;
                }
        }
 
-       if (di->bat->batt_id == BATTERY_UNKNOWN) {
+       if (di->bm->batt_id == BATTERY_UNKNOWN) {
                dev_warn(di->dev, "Battery identified as unknown"
                        ", resistance %d Ohm\n", res);
                return -ENXIO;
@@ -548,13 +581,18 @@ static int ab8500_btemp_id(struct ab8500_btemp *di)
         * We only have to change current source if the
         * detected type is Type 1, else we use the 7uA source
         */
-       if (di->bat->adc_therm == ABx500_ADC_THERM_BATCTRL &&
-                       di->bat->batt_id == 1) {
-               dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
-               di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+       if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
+                       di->bm->batt_id == 1) {
+               if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+                       dev_dbg(di->dev, "Set BATCTRL current source to 16uA\n");
+                       di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
+               } else {
+                       dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
+                       di->curr_source = BTEMP_BATCTRL_CURR_SRC_20UA;
+               }
        }
 
-       return di->bat->batt_id;
+       return di->bm->batt_id;
 }
 
 /**
@@ -569,6 +607,13 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
        struct ab8500_btemp *di = container_of(work,
                struct ab8500_btemp, btemp_periodic_work.work);
 
+       if (!di->initialized) {
+               di->initialized = true;
+               /* Identify the battery */
+               if (ab8500_btemp_id(di) < 0)
+                       dev_warn(di->dev, "failed to identify the battery\n");
+       }
+
        di->bat_temp = ab8500_btemp_measure_temp(di);
 
        if (di->bat_temp != di->prev_bat_temp) {
@@ -577,9 +622,9 @@ static void ab8500_btemp_periodic_work(struct work_struct *work)
        }
 
        if (di->events.ac_conn || di->events.usb_conn)
-               interval = di->bat->temp_interval_chg;
+               interval = di->bm->temp_interval_chg;
        else
-               interval = di->bat->temp_interval_nochg;
+               interval = di->bm->temp_interval_nochg;
 
        /* Schedule a new measurement */
        queue_delayed_work(di->btemp_wq,
@@ -616,9 +661,9 @@ static irqreturn_t ab8500_btemp_templow_handler(int irq, void *_di)
 {
        struct ab8500_btemp *di = _di;
 
-       if (is_ab8500_2p0_or_earlier(di->parent)) {
+       if (is_ab8500_3p3_or_earlier(di->parent)) {
                dev_dbg(di->dev, "Ignore false btemp low irq"
-                       " for ABB cut 1.0, 1.1 and 2.0\n");
+                       " for ABB cut 1.0, 1.1, 2.0 and 3.3\n");
        } else {
                dev_crit(di->dev, "Battery temperature lower than -10deg c\n");
 
@@ -732,30 +777,30 @@ static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
        int temp = 0;
 
        /*
-        * The BTEMP events are not reliabe on AB8500 cut2.0
+        * The BTEMP events are not reliabe on AB8500 cut3.3
         * and prior versions
         */
-       if (is_ab8500_2p0_or_earlier(di->parent)) {
+       if (is_ab8500_3p3_or_earlier(di->parent)) {
                temp = di->bat_temp * 10;
        } else {
                if (di->events.btemp_low) {
                        if (temp > di->btemp_ranges.btemp_low_limit)
-                               temp = di->btemp_ranges.btemp_low_limit;
+                               temp = di->btemp_ranges.btemp_low_limit * 10;
                        else
                                temp = di->bat_temp * 10;
                } else if (di->events.btemp_high) {
                        if (temp < di->btemp_ranges.btemp_high_limit)
-                               temp = di->btemp_ranges.btemp_high_limit;
+                               temp = di->btemp_ranges.btemp_high_limit * 10;
                        else
                                temp = di->bat_temp * 10;
                } else if (di->events.btemp_lowmed) {
                        if (temp > di->btemp_ranges.btemp_med_limit)
-                               temp = di->btemp_ranges.btemp_med_limit;
+                               temp = di->btemp_ranges.btemp_med_limit * 10;
                        else
                                temp = di->bat_temp * 10;
                } else if (di->events.btemp_medhigh) {
                        if (temp < di->btemp_ranges.btemp_med_limit)
-                               temp = di->btemp_ranges.btemp_med_limit;
+                               temp = di->btemp_ranges.btemp_med_limit * 10;
                        else
                                temp = di->bat_temp * 10;
                } else
@@ -806,7 +851,7 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
                        val->intval = 1;
                break;
        case POWER_SUPPLY_PROP_TECHNOLOGY:
-               val->intval = di->bat->bat_type[di->bat->batt_id].name;
+               val->intval = di->bm->bat_type[di->bm->batt_id].name;
                break;
        case POWER_SUPPLY_PROP_TEMP:
                val->intval = ab8500_btemp_get_temp(di);
@@ -967,6 +1012,7 @@ static char *supply_interface[] = {
 static int ab8500_btemp_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct abx500_bm_data *plat = pdev->dev.platform_data;
        struct ab8500_btemp *di;
        int irq, i, ret = 0;
        u8 val;
@@ -976,21 +1022,19 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__);
                return -ENOMEM;
        }
-       di->bat = pdev->mfd_cell->platform_data;
-       if (!di->bat) {
-               if (np) {
-                       ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "failed to get battery information\n");
-                               return ret;
-                       }
-               } else {
-                       dev_err(&pdev->dev, "missing dt node for ab8500_btemp\n");
-                       return -EINVAL;
+
+       if (!plat) {
+               dev_err(&pdev->dev, "no battery management data supplied\n");
+               return -EINVAL;
+       }
+       di->bm = plat;
+
+       if (np) {
+               ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get battery information\n");
+                       return ret;
                }
-       } else {
-               dev_info(&pdev->dev, "falling back to legacy platform data\n");
        }
 
        /* get parent data */
@@ -998,6 +1042,8 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
        di->parent = dev_get_drvdata(pdev->dev.parent);
        di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
+       di->initialized = false;
+
        /* BTEMP supply */
        di->btemp_psy.name = "ab8500_btemp";
        di->btemp_psy.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1022,10 +1068,6 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
        INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
                ab8500_btemp_periodic_work);
 
-       /* Identify the battery */
-       if (ab8500_btemp_id(di) < 0)
-               dev_warn(di->dev, "failed to identify the battery\n");
-
        /* Set BTEMP thermal limits. Low and Med are fixed */
        di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
        di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
@@ -1123,7 +1165,7 @@ static void __exit ab8500_btemp_exit(void)
        platform_driver_unregister(&ab8500_btemp_driver);
 }
 
-subsys_initcall_sync(ab8500_btemp_init);
+device_initcall(ab8500_btemp_init);
 module_exit(ab8500_btemp_exit);
 
 MODULE_LICENSE("GPL v2");
index 3be9c0ee3fc58a2c016cb9509ceb316c54312b5f..24b30b7ea5ca55219cb52918bb25a31090aeb1c8 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/mfd/abx500/ab8500-gpadc.h>
 #include <linux/mfd/abx500/ux500_chargalg.h>
 #include <linux/usb/otg.h>
+#include <linux/mutex.h>
 
 /* Charger constants */
 #define NO_PW_CONN                     0
@@ -54,6 +55,7 @@
 
 #define MAIN_CH_INPUT_CURR_SHIFT       4
 #define VBUS_IN_CURR_LIM_SHIFT         4
+#define AUTO_VBUS_IN_CURR_LIM_SHIFT    4
 
 #define LED_INDICATOR_PWM_ENA          0x01
 #define LED_INDICATOR_PWM_DIS          0x00
 #define MAIN_CH_NOK                    0x01
 #define VBUS_DET                       0x80
 
+#define MAIN_CH_STATUS2_MAINCHGDROP            0x80
+#define MAIN_CH_STATUS2_MAINCHARGERDETDBNC     0x40
+#define USB_CH_VBUSDROP                                0x40
+#define USB_CH_VBUSDETDBNC                     0x01
+
 /* UsbLineStatus register bit masks */
 #define AB8500_USB_LINK_STATUS         0x78
 #define AB8500_STD_HOST_SUSP           0x18
 /* Lowest charger voltage is 3.39V -> 0x4E */
 #define LOW_VOLT_REG                   0x4E
 
+/* Step up/down delay in us */
+#define STEP_UDELAY                    1000
+
+#define CHARGER_STATUS_POLL 10 /* in ms */
+
+#define CHG_WD_INTERVAL                        (60 * HZ)
+
+#define AB8500_SW_CONTROL_FALLBACK     0x03
+/* Wait for enumeration before charing in us */
+#define WAIT_ACA_RID_ENUMERATION       (5 * 1000)
+
 /* UsbLineStatus register - usb types */
 enum ab8500_charger_link_status {
        USB_STAT_NOT_CONFIGURED,
@@ -97,6 +115,13 @@ enum ab8500_charger_link_status {
        USB_STAT_HM_IDGND,
        USB_STAT_RESERVED,
        USB_STAT_NOT_VALID_LINK,
+       USB_STAT_PHY_EN,
+       USB_STAT_SUP_NO_IDGND_VBUS,
+       USB_STAT_SUP_IDGND_VBUS,
+       USB_STAT_CHARGER_LINE_1,
+       USB_STAT_CARKIT_1,
+       USB_STAT_CARKIT_2,
+       USB_STAT_ACA_DOCK_CHARGER,
 };
 
 enum ab8500_usb_state {
@@ -149,6 +174,7 @@ struct ab8500_charger_info {
        int charger_voltage;
        int cv_active;
        bool wd_expired;
+       int charger_current;
 };
 
 struct ab8500_charger_event_flags {
@@ -159,12 +185,14 @@ struct ab8500_charger_event_flags {
        bool usbchargernotok;
        bool chgwdexp;
        bool vbus_collapse;
+       bool vbus_drop_end;
 };
 
 struct ab8500_charger_usb_state {
-       bool usb_changed;
        int usb_current;
+       int usb_current_tmp;
        enum ab8500_usb_state state;
+       enum ab8500_usb_state state_tmp;
        spinlock_t usb_lock;
 };
 
@@ -182,11 +210,17 @@ struct ab8500_charger_usb_state {
  *                     charger is enabled
  * @vbat               Battery voltage
  * @old_vbat           Previously measured battery voltage
+ * @usb_device_is_unrecognised USB device is unrecognised by the hardware
  * @autopower          Indicate if we should have automatic pwron after pwrloss
  * @autopower_cfg      platform specific power config support for "pwron after pwrloss"
+ * @invalid_charger_detect_state State when forcing AB to use invalid charger
+ * @is_usb_host:       Indicate if last detected USB type is host
+ * @is_aca_rid:                Incicate if accessory is ACA type
+ * @current_stepping_sessions:
+ *                     Counter for current stepping sessions
  * @parent:            Pointer to the struct ab8500
  * @gpadc:             Pointer to the struct gpadc
- * @bat:               Pointer to the abx500_bm platform data
+ * @bm:                Platform specific battery management information
  * @flags:             Structure for information about events triggered
  * @usb_state:         Structure for usb stack information
  * @ac_chg:            AC charger power supply
@@ -195,19 +229,28 @@ struct ab8500_charger_usb_state {
  * @usb:               Structure that holds the USB charger properties
  * @regu:              Pointer to the struct regulator
  * @charger_wq:                Work queue for the IRQs and checking HW state
+ * @usb_ipt_crnt_lock: Lock to protect VBUS input current setting from mutuals
+ * @pm_lock:           Lock to prevent system to suspend
  * @check_vbat_work    Work for checking vbat threshold to adjust vbus current
  * @check_hw_failure_work:     Work for checking HW state
  * @check_usbchgnotok_work:    Work for checking USB charger not ok status
  * @kick_wd_work:              Work for kicking the charger watchdog in case
  *                             of ABB rev 1.* due to the watchog logic bug
+ * @ac_charger_attached_work:  Work for checking if AC charger is still
+ *                             connected
+ * @usb_charger_attached_work: Work for checking if USB charger is still
+ *                             connected
  * @ac_work:                   Work for checking AC charger connection
  * @detect_usb_type_work:      Work for detecting the USB type connected
  * @usb_link_status_work:      Work for checking the new USB link status
  * @usb_state_changed_work:    Work for checking USB state
+ * @attach_work:               Work for detecting USB type
+ * @vbus_drop_end_work:                Work for detecting VBUS drop end
  * @check_main_thermal_prot_work:
  *                             Work for checking Main thermal status
  * @check_usb_thermal_prot_work:
  *                             Work for checking USB thermal status
+ * @charger_attached_mutex:    For controlling the wakelock
  */
 struct ab8500_charger {
        struct device *dev;
@@ -219,11 +262,16 @@ struct ab8500_charger {
        bool vddadc_en_usb;
        int vbat;
        int old_vbat;
+       bool usb_device_is_unrecognised;
        bool autopower;
        bool autopower_cfg;
+       int invalid_charger_detect_state;
+       bool is_usb_host;
+       int is_aca_rid;
+       atomic_t current_stepping_sessions;
        struct ab8500 *parent;
        struct ab8500_gpadc *gpadc;
-       struct abx500_bm_data *bat;
+       struct abx500_bm_data *bm;
        struct ab8500_charger_event_flags flags;
        struct ab8500_charger_usb_state usb_state;
        struct ux500_charger ac_chg;
@@ -232,18 +280,24 @@ struct ab8500_charger {
        struct ab8500_charger_info usb;
        struct regulator *regu;
        struct workqueue_struct *charger_wq;
+       struct mutex usb_ipt_crnt_lock;
        struct delayed_work check_vbat_work;
        struct delayed_work check_hw_failure_work;
        struct delayed_work check_usbchgnotok_work;
        struct delayed_work kick_wd_work;
+       struct delayed_work usb_state_changed_work;
+       struct delayed_work attach_work;
+       struct delayed_work ac_charger_attached_work;
+       struct delayed_work usb_charger_attached_work;
+       struct delayed_work vbus_drop_end_work;
        struct work_struct ac_work;
        struct work_struct detect_usb_type_work;
        struct work_struct usb_link_status_work;
-       struct work_struct usb_state_changed_work;
        struct work_struct check_main_thermal_prot_work;
        struct work_struct check_usb_thermal_prot_work;
        struct usb_phy *usb_phy;
        struct notifier_block nb;
+       struct mutex charger_attached_mutex;
 };
 
 /* AC properties */
@@ -267,50 +321,65 @@ static enum power_supply_property ab8500_charger_usb_props[] = {
        POWER_SUPPLY_PROP_CURRENT_NOW,
 };
 
-/**
- * ab8500_power_loss_handling - set how we handle powerloss.
- * @di:                pointer to the ab8500_charger structure
- *
- * Magic nummbers are from STE HW department.
+/*
+ * Function for enabling and disabling sw fallback mode
+ * should always be disabled when no charger is connected.
  */
-static void ab8500_power_loss_handling(struct ab8500_charger *di)
+static void ab8500_enable_disable_sw_fallback(struct ab8500_charger *di,
+               bool fallback)
 {
+       u8 val;
        u8 reg;
+       u8 bank;
+       u8 bit;
        int ret;
 
-       dev_dbg(di->dev, "Autopower : %d\n", di->autopower);
+       dev_dbg(di->dev, "SW Fallback: %d\n", fallback);
 
-       /* read the autopower register */
-       ret = abx500_get_register_interruptible(di->dev, 0x15, 0x00, &reg);
-       if (ret) {
-               dev_err(di->dev, "%d write failed\n", __LINE__);
-               return;
+       if (is_ab8500(di->parent)) {
+               bank = 0x15;
+               reg = 0x0;
+               bit = 3;
+       } else {
+               bank = AB8500_SYS_CTRL1_BLOCK;
+               reg = AB8500_SW_CONTROL_FALLBACK;
+               bit = 0;
        }
 
-       /* enable the OPT emulation registers */
-       ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
-       if (ret) {
-               dev_err(di->dev, "%d write failed\n", __LINE__);
+       /* read the register containing fallback bit */
+       ret = abx500_get_register_interruptible(di->dev, bank, reg, &val);
+       if (ret < 0) {
+               dev_err(di->dev, "%d read failed\n", __LINE__);
                return;
        }
 
-       if (di->autopower)
-               reg |= 0x8;
+       if (is_ab8500(di->parent)) {
+               /* enable the OPT emulation registers */
+               ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x2);
+               if (ret) {
+                       dev_err(di->dev, "%d write failed\n", __LINE__);
+                       goto disable_otp;
+               }
+       }
+
+       if (fallback)
+               val |= (1 << bit);
        else
-               reg &= ~0x8;
+               val &= ~(1 << bit);
 
-       /* write back the changed value to autopower reg */
-       ret = abx500_set_register_interruptible(di->dev, 0x15, 0x00, reg);
+       /* write back the changed fallback bit value to register */
+       ret = abx500_set_register_interruptible(di->dev, bank, reg, val);
        if (ret) {
                dev_err(di->dev, "%d write failed\n", __LINE__);
-               return;
        }
 
-       /* disable the set OTP registers again */
-       ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
-       if (ret) {
-               dev_err(di->dev, "%d write failed\n", __LINE__);
-               return;
+disable_otp:
+       if (is_ab8500(di->parent)) {
+               /* disable the set OTP registers again */
+               ret = abx500_set_register_interruptible(di->dev, 0x11, 0x00, 0x0);
+               if (ret) {
+                       dev_err(di->dev, "%d write failed\n", __LINE__);
+               }
        }
 }
 
@@ -329,12 +398,12 @@ static void ab8500_power_supply_changed(struct ab8500_charger *di,
                    !di->ac.charger_connected &&
                    di->autopower) {
                        di->autopower = false;
-                       ab8500_power_loss_handling(di);
+                       ab8500_enable_disable_sw_fallback(di, false);
                } else if (!di->autopower &&
                           (di->ac.charger_connected ||
                            di->usb.charger_connected)) {
                        di->autopower = true;
-                       ab8500_power_loss_handling(di);
+                       ab8500_enable_disable_sw_fallback(di, true);
                }
        }
        power_supply_changed(psy);
@@ -347,6 +416,19 @@ static void ab8500_charger_set_usb_connected(struct ab8500_charger *di,
                dev_dbg(di->dev, "USB connected:%i\n", connected);
                di->usb.charger_connected = connected;
                sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
+
+               if (connected) {
+                       mutex_lock(&di->charger_attached_mutex);
+                       mutex_unlock(&di->charger_attached_mutex);
+
+                       queue_delayed_work(di->charger_wq,
+                                          &di->usb_charger_attached_work,
+                                          HZ);
+               } else {
+                       cancel_delayed_work_sync(&di->usb_charger_attached_work);
+                       mutex_lock(&di->charger_attached_mutex);
+                       mutex_unlock(&di->charger_attached_mutex);
+               }
        }
 }
 
@@ -500,6 +582,7 @@ static int ab8500_charger_usb_cv(struct ab8500_charger *di)
 /**
  * ab8500_charger_detect_chargers() - Detect the connected chargers
  * @di:                pointer to the ab8500_charger structure
+ * @probe:     if probe, don't delay and wait for HW
  *
  * Returns the type of charger connected.
  * For USB it will not mean we can actually charge from it
@@ -513,7 +596,7 @@ static int ab8500_charger_usb_cv(struct ab8500_charger *di)
  * USB_PW_CONN  if the USB power supply is connected
  * AC_PW_CONN + USB_PW_CONN if USB and AC power supplies are both connected
  */
-static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
+static int ab8500_charger_detect_chargers(struct ab8500_charger *di, bool probe)
 {
        int result = NO_PW_CONN;
        int ret;
@@ -531,13 +614,25 @@ static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
                result = AC_PW_CONN;
 
        /* Check for USB charger */
+
+       if (!probe) {
+               /*
+                * AB8500 says VBUS_DET_DBNC1 & VBUS_DET_DBNC100
+                * when disconnecting ACA even though no
+                * charger was connected. Try waiting a little
+                * longer than the 100 ms of VBUS_DET_DBNC100...
+                */
+               msleep(110);
+       }
        ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
                AB8500_CH_USBCH_STAT1_REG, &val);
        if (ret < 0) {
                dev_err(di->dev, "%s ab8500 read failed\n", __func__);
                return ret;
        }
-
+       dev_dbg(di->dev,
+               "%s AB8500_CH_USBCH_STAT1_REG %x\n", __func__,
+               val);
        if ((val & VBUS_DET_DBNC1) && (val & VBUS_DET_DBNC100))
                result |= USB_PW_CONN;
 
@@ -554,31 +649,53 @@ static int ab8500_charger_detect_chargers(struct ab8500_charger *di)
  * Returns error code in case of failure else 0 on success
  */
 static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
-       enum ab8500_charger_link_status link_status)
+               enum ab8500_charger_link_status link_status)
 {
        int ret = 0;
 
+       di->usb_device_is_unrecognised = false;
+
+       /*
+        * Platform only supports USB 2.0.
+        * This means that charging current from USB source
+        * is maximum 500 mA. Every occurence of USB_STAT_*_HOST_*
+        * should set USB_CH_IP_CUR_LVL_0P5.
+        */
+
        switch (link_status) {
        case USB_STAT_STD_HOST_NC:
        case USB_STAT_STD_HOST_C_NS:
        case USB_STAT_STD_HOST_C_S:
                dev_dbg(di->dev, "USB Type - Standard host is "
                        "detected through USB driver\n");
-               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               di->is_usb_host = true;
+               di->is_aca_rid = 0;
                break;
        case USB_STAT_HOST_CHG_HS_CHIRP:
                di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               di->is_usb_host = true;
+               di->is_aca_rid = 0;
                break;
        case USB_STAT_HOST_CHG_HS:
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               di->is_usb_host = true;
+               di->is_aca_rid = 0;
+               break;
        case USB_STAT_ACA_RID_C_HS:
                di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
+               di->is_usb_host = false;
+               di->is_aca_rid = 0;
                break;
        case USB_STAT_ACA_RID_A:
                /*
                 * Dedicated charger level minus maximum current accessory
-                * can consume (300mA). Closest level is 1100mA
+                * can consume (900mA). Closest level is 500mA
                 */
-               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P1;
+               dev_dbg(di->dev, "USB_STAT_ACA_RID_A detected\n");
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               di->is_usb_host = false;
+               di->is_aca_rid = 1;
                break;
        case USB_STAT_ACA_RID_B:
                /*
@@ -586,34 +703,68 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
                 * 100mA for potential accessory). Closest level is 1300mA
                 */
                di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+               dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
+                               di->max_usb_in_curr);
+               di->is_usb_host = false;
+               di->is_aca_rid = 1;
                break;
-       case USB_STAT_DEDICATED_CHG:
        case USB_STAT_HOST_CHG_NM:
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               di->is_usb_host = true;
+               di->is_aca_rid = 0;
+               break;
+       case USB_STAT_DEDICATED_CHG:
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+               di->is_usb_host = false;
+               di->is_aca_rid = 0;
+               break;
        case USB_STAT_ACA_RID_C_HS_CHIRP:
        case USB_STAT_ACA_RID_C_NM:
                di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+               di->is_usb_host = false;
+               di->is_aca_rid = 1;
                break;
-       case USB_STAT_RESERVED:
-               /*
-                * This state is used to indicate that VBUS has dropped below
-                * the detection level 4 times in a row. This is due to the
-                * charger output current is set to high making the charger
-                * voltage collapse. This have to be propagated through to
-                * chargalg. This is done using the property
-                * POWER_SUPPLY_PROP_CURRENT_AVG = 1
-                */
-               di->flags.vbus_collapse = true;
-               dev_dbg(di->dev, "USB Type - USB_STAT_RESERVED "
-                       "VBUS has collapsed\n");
-               ret = -1;
-               break;
-       case USB_STAT_HM_IDGND:
        case USB_STAT_NOT_CONFIGURED:
-       case USB_STAT_NOT_VALID_LINK:
+               if (di->vbus_detected) {
+                       di->usb_device_is_unrecognised = true;
+                       dev_dbg(di->dev, "USB Type - Legacy charger.\n");
+                       di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+                       break;
+               }
+       case USB_STAT_HM_IDGND:
                dev_err(di->dev, "USB Type - Charging not allowed\n");
                di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
                ret = -ENXIO;
                break;
+       case USB_STAT_RESERVED:
+               if (is_ab8500(di->parent)) {
+                       di->flags.vbus_collapse = true;
+                       dev_err(di->dev, "USB Type - USB_STAT_RESERVED "
+                                               "VBUS has collapsed\n");
+                       ret = -ENXIO;
+                       break;
+               }
+               if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+                       dev_dbg(di->dev, "USB Type - Charging not allowed\n");
+                       di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+                       dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
+                                       link_status, di->max_usb_in_curr);
+                       ret = -ENXIO;
+                       break;
+               }
+               break;
+       case USB_STAT_CARKIT_1:
+       case USB_STAT_CARKIT_2:
+       case USB_STAT_ACA_DOCK_CHARGER:
+       case USB_STAT_CHARGER_LINE_1:
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
+                               di->max_usb_in_curr);
+       case USB_STAT_NOT_VALID_LINK:
+               dev_err(di->dev, "USB Type invalid - try charging anyway\n");
+               di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+               break;
+
        default:
                dev_err(di->dev, "USB Type - Unknown\n");
                di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
@@ -645,8 +796,14 @@ static int ab8500_charger_read_usb_type(struct ab8500_charger *di)
                dev_err(di->dev, "%s ab8500 read failed\n", __func__);
                return ret;
        }
-       ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
-               AB8500_USB_LINE_STAT_REG, &val);
+       if (is_ab8500(di->parent)) {
+               ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+                               AB8500_USB_LINE_STAT_REG, &val);
+       } else {
+               if (is_ab9540(di->parent) || is_ab8505(di->parent))
+                       ret = abx500_get_register_interruptible(di->dev,
+                               AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
+       }
        if (ret < 0) {
                dev_err(di->dev, "%s ab8500 read failed\n", __func__);
                return ret;
@@ -682,16 +839,25 @@ static int ab8500_charger_detect_usb_type(struct ab8500_charger *di)
                ret = abx500_get_register_interruptible(di->dev,
                        AB8500_INTERRUPT, AB8500_IT_SOURCE21_REG,
                        &val);
+               dev_dbg(di->dev, "%s AB8500_IT_SOURCE21_REG %x\n",
+                       __func__, val);
                if (ret < 0) {
                        dev_err(di->dev, "%s ab8500 read failed\n", __func__);
                        return ret;
                }
-               ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
-                       AB8500_USB_LINE_STAT_REG, &val);
+
+               if (is_ab8500(di->parent))
+                       ret = abx500_get_register_interruptible(di->dev,
+                               AB8500_USB, AB8500_USB_LINE_STAT_REG, &val);
+               else
+                       ret = abx500_get_register_interruptible(di->dev,
+                               AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
                if (ret < 0) {
                        dev_err(di->dev, "%s ab8500 read failed\n", __func__);
                        return ret;
                }
+               dev_dbg(di->dev, "%s AB8500_USB_LINE_STAT_REG %x\n", __func__,
+                       val);
                /*
                 * Until the IT source register is read the UsbLineStatus
                 * register is not updated, hence doing the same
@@ -935,6 +1101,144 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
        return 0;
 }
 
+/**
+ * ab8500_charger_set_current() - set charger current
+ * @di:                pointer to the ab8500_charger structure
+ * @ich:       charger current, in mA
+ * @reg:       select what charger register to set
+ *
+ * Set charger current.
+ * There is no state machine in the AB to step up/down the charger
+ * current to avoid dips and spikes on MAIN, VBUS and VBAT when
+ * charging is started. Instead we need to implement
+ * this charger current step-up/down here.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_current(struct ab8500_charger *di,
+       int ich, int reg)
+{
+       int ret = 0;
+       int auto_curr_index, curr_index, prev_curr_index, shift_value, i;
+       u8 reg_value;
+       u32 step_udelay;
+       bool no_stepping = false;
+
+       atomic_inc(&di->current_stepping_sessions);
+
+       ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+               reg, &reg_value);
+       if (ret < 0) {
+               dev_err(di->dev, "%s read failed\n", __func__);
+               goto exit_set_current;
+       }
+
+       switch (reg) {
+       case AB8500_MCH_IPT_CURLVL_REG:
+               shift_value = MAIN_CH_INPUT_CURR_SHIFT;
+               prev_curr_index = (reg_value >> shift_value);
+               curr_index = ab8500_current_to_regval(ich);
+               step_udelay = STEP_UDELAY;
+               if (!di->ac.charger_connected)
+                       no_stepping = true;
+               break;
+       case AB8500_USBCH_IPT_CRNTLVL_REG:
+               shift_value = VBUS_IN_CURR_LIM_SHIFT;
+               prev_curr_index = (reg_value >> shift_value);
+               curr_index = ab8500_vbus_in_curr_to_regval(ich);
+               step_udelay = STEP_UDELAY * 100;
+
+               ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+                                       AB8500_CH_USBCH_STAT2_REG, &reg_value);
+               if (ret < 0) {
+                       dev_err(di->dev, "%s read failed\n", __func__);
+                       goto exit_set_current;
+               }
+               auto_curr_index =
+                       reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT;
+
+               dev_dbg(di->dev, "%s Auto VBUS curr is %d mA\n",
+                       __func__,
+                       ab8500_charger_vbus_in_curr_map[auto_curr_index]);
+
+               prev_curr_index = min(prev_curr_index, auto_curr_index);
+
+               if (!di->usb.charger_connected)
+                       no_stepping = true;
+               break;
+       case AB8500_CH_OPT_CRNTLVL_REG:
+               shift_value = 0;
+               prev_curr_index = (reg_value >> shift_value);
+               curr_index = ab8500_current_to_regval(ich);
+               step_udelay = STEP_UDELAY;
+               if (curr_index && (curr_index - prev_curr_index) > 1)
+                       step_udelay *= 100;
+
+               if (!di->usb.charger_connected && !di->ac.charger_connected)
+                       no_stepping = true;
+
+               break;
+       default:
+               dev_err(di->dev, "%s current register not valid\n", __func__);
+               ret = -ENXIO;
+               goto exit_set_current;
+       }
+
+       if (curr_index < 0) {
+               dev_err(di->dev, "requested current limit out-of-range\n");
+               ret = -ENXIO;
+               goto exit_set_current;
+       }
+
+       /* only update current if it's been changed */
+       if (prev_curr_index == curr_index) {
+               dev_dbg(di->dev, "%s current not changed for reg: 0x%02x\n",
+                       __func__, reg);
+               ret = 0;
+               goto exit_set_current;
+       }
+
+       dev_dbg(di->dev, "%s set charger current: %d mA for reg: 0x%02x\n",
+               __func__, ich, reg);
+
+       if (no_stepping) {
+               ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+                                       reg, (u8)curr_index << shift_value);
+               if (ret)
+                       dev_err(di->dev, "%s write failed\n", __func__);
+       } else if (prev_curr_index > curr_index) {
+               for (i = prev_curr_index - 1; i >= curr_index; i--) {
+                       dev_dbg(di->dev, "curr change_1 to: %x for 0x%02x\n",
+                               (u8) i << shift_value, reg);
+                       ret = abx500_set_register_interruptible(di->dev,
+                               AB8500_CHARGER, reg, (u8)i << shift_value);
+                       if (ret) {
+                               dev_err(di->dev, "%s write failed\n", __func__);
+                               goto exit_set_current;
+                       }
+                       if (i != curr_index)
+                               usleep_range(step_udelay, step_udelay * 2);
+               }
+       } else {
+               for (i = prev_curr_index + 1; i <= curr_index; i++) {
+                       dev_dbg(di->dev, "curr change_2 to: %x for 0x%02x\n",
+                               (u8)i << shift_value, reg);
+                       ret = abx500_set_register_interruptible(di->dev,
+                               AB8500_CHARGER, reg, (u8)i << shift_value);
+                       if (ret) {
+                               dev_err(di->dev, "%s write failed\n", __func__);
+                               goto exit_set_current;
+                       }
+                       if (i != curr_index)
+                               usleep_range(step_udelay, step_udelay * 2);
+               }
+       }
+
+exit_set_current:
+       atomic_dec(&di->current_stepping_sessions);
+
+       return ret;
+}
+
 /**
  * ab8500_charger_set_vbus_in_curr() - set VBUS input current limit
  * @di:                pointer to the ab8500_charger structure
@@ -946,12 +1250,11 @@ static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
 static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
                int ich_in)
 {
-       int ret;
-       int input_curr_index;
        int min_value;
+       int ret;
 
        /* We should always use to lowest current limit */
-       min_value = min(di->bat->chg_params->usb_curr_max, ich_in);
+       min_value = min(di->bm->chg_params->usb_curr_max, ich_in);
 
        switch (min_value) {
        case 100:
@@ -966,21 +1269,46 @@ static int ab8500_charger_set_vbus_in_curr(struct ab8500_charger *di,
                break;
        }
 
-       input_curr_index = ab8500_vbus_in_curr_to_regval(min_value);
-       if (input_curr_index < 0) {
-               dev_err(di->dev, "VBUS input current limit too high\n");
-               return -ENXIO;
-       }
+       dev_info(di->dev, "VBUS input current limit set to %d mA\n", min_value);
 
-       ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-               AB8500_USBCH_IPT_CRNTLVL_REG,
-               input_curr_index << VBUS_IN_CURR_LIM_SHIFT);
-       if (ret)
-               dev_err(di->dev, "%s write failed\n", __func__);
+       mutex_lock(&di->usb_ipt_crnt_lock);
+       ret = ab8500_charger_set_current(di, min_value,
+               AB8500_USBCH_IPT_CRNTLVL_REG);
+       mutex_unlock(&di->usb_ipt_crnt_lock);
 
        return ret;
 }
 
+/**
+ * ab8500_charger_set_main_in_curr() - set main charger input current
+ * @di:                pointer to the ab8500_charger structure
+ * @ich_in:    input charger current, in mA
+ *
+ * Set main charger input current.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_main_in_curr(struct ab8500_charger *di,
+       int ich_in)
+{
+       return ab8500_charger_set_current(di, ich_in,
+               AB8500_MCH_IPT_CURLVL_REG);
+}
+
+/**
+ * ab8500_charger_set_output_curr() - set charger output current
+ * @di:                pointer to the ab8500_charger structure
+ * @ich_out:   output charger current, in mA
+ *
+ * Set charger output current.
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8500_charger_set_output_curr(struct ab8500_charger *di,
+       int ich_out)
+{
+       return ab8500_charger_set_current(di, ich_out,
+               AB8500_CH_OPT_CRNTLVL_REG);
+}
+
 /**
  * ab8500_charger_led_en() - turn on/off chargign led
  * @di:                pointer to the ab8500_charger structure
@@ -1074,7 +1402,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
                volt_index = ab8500_voltage_to_regval(vset);
                curr_index = ab8500_current_to_regval(iset);
                input_curr_index = ab8500_current_to_regval(
-                       di->bat->chg_params->ac_curr_max);
+                       di->bm->chg_params->ac_curr_max);
                if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
                        dev_err(di->dev,
                                "Charger voltage or current too high, "
@@ -1090,23 +1418,24 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
                        return ret;
                }
                /* MainChInputCurr: current that can be drawn from the charger*/
-               ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-                       AB8500_MCH_IPT_CURLVL_REG,
-                       input_curr_index << MAIN_CH_INPUT_CURR_SHIFT);
+               ret = ab8500_charger_set_main_in_curr(di,
+                       di->bm->chg_params->ac_curr_max);
                if (ret) {
-                       dev_err(di->dev, "%s write failed\n", __func__);
+                       dev_err(di->dev, "%s Failed to set MainChInputCurr\n",
+                               __func__);
                        return ret;
                }
                /* ChOutputCurentLevel: protected output current */
-               ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-                       AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+               ret = ab8500_charger_set_output_curr(di, iset);
                if (ret) {
-                       dev_err(di->dev, "%s write failed\n", __func__);
+                       dev_err(di->dev, "%s "
+                               "Failed to set ChOutputCurentLevel\n",
+                               __func__);
                        return ret;
                }
 
                /* Check if VBAT overshoot control should be enabled */
-               if (!di->bat->enable_overshoot)
+               if (!di->bm->enable_overshoot)
                        overshoot = MAIN_CH_NO_OVERSHOOT_ENA_N;
 
                /* Enable Main Charger */
@@ -1158,12 +1487,11 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger,
                                return ret;
                        }
 
-                       ret = abx500_set_register_interruptible(di->dev,
-                               AB8500_CHARGER,
-                               AB8500_CH_OPT_CRNTLVL_REG, CH_OP_CUR_LVL_0P1);
+                       ret = ab8500_charger_set_output_curr(di, 0);
                        if (ret) {
-                               dev_err(di->dev,
-                                       "%s write failed\n", __func__);
+                               dev_err(di->dev, "%s "
+                                       "Failed to set ChOutputCurentLevel\n",
+                                       __func__);
                                return ret;
                        }
                } else {
@@ -1259,24 +1587,13 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
                        dev_err(di->dev, "%s write failed\n", __func__);
                        return ret;
                }
-               /* USBChInputCurr: current that can be drawn from the usb */
-               ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
-               if (ret) {
-                       dev_err(di->dev, "setting USBChInputCurr failed\n");
-                       return ret;
-               }
-               /* ChOutputCurentLevel: protected output current */
-               ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-                       AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
-               if (ret) {
-                       dev_err(di->dev, "%s write failed\n", __func__);
-                       return ret;
-               }
                /* Check if VBAT overshoot control should be enabled */
-               if (!di->bat->enable_overshoot)
+               if (!di->bm->enable_overshoot)
                        overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
 
                /* Enable USB Charger */
+               dev_dbg(di->dev,
+                       "Enabling USB with write to AB8500_USBCH_CTRL1_REG\n");
                ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
                        AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
                if (ret) {
@@ -1289,11 +1606,29 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
                if (ret < 0)
                        dev_err(di->dev, "failed to enable LED\n");
 
+               di->usb.charger_online = 1;
+
+               /* USBChInputCurr: current that can be drawn from the usb */
+               ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+               if (ret) {
+                       dev_err(di->dev, "setting USBChInputCurr failed\n");
+                       return ret;
+               }
+
+               /* ChOutputCurentLevel: protected output current */
+               ret = ab8500_charger_set_output_curr(di, ich_out);
+               if (ret) {
+                       dev_err(di->dev, "%s "
+                               "Failed to set ChOutputCurentLevel\n",
+                               __func__);
+                       return ret;
+               }
+
                queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
 
-               di->usb.charger_online = 1;
        } else {
                /* Disable USB charging */
+               dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
                ret = abx500_set_register_interruptible(di->dev,
                        AB8500_CHARGER,
                        AB8500_USBCH_CTRL1_REG, 0);
@@ -1306,7 +1641,21 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger,
                ret = ab8500_charger_led_en(di, false);
                if (ret < 0)
                        dev_err(di->dev, "failed to disable LED\n");
+               /* USBChInputCurr: current that can be drawn from the usb */
+               ret = ab8500_charger_set_vbus_in_curr(di, 0);
+               if (ret) {
+                       dev_err(di->dev, "setting USBChInputCurr failed\n");
+                       return ret;
+               }
 
+               /* ChOutputCurentLevel: protected output current */
+               ret = ab8500_charger_set_output_curr(di, 0);
+               if (ret) {
+                       dev_err(di->dev, "%s "
+                               "Failed to reset ChOutputCurentLevel\n",
+                               __func__);
+                       return ret;
+               }
                di->usb.charger_online = 0;
                di->usb.wd_expired = false;
 
@@ -1366,7 +1715,6 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
                int ich_out)
 {
        int ret;
-       int curr_index;
        struct ab8500_charger *di;
 
        if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
@@ -1376,18 +1724,11 @@ static int ab8500_charger_update_charger_current(struct ux500_charger *charger,
        else
                return -ENXIO;
 
-       curr_index = ab8500_current_to_regval(ich_out);
-       if (curr_index < 0) {
-               dev_err(di->dev,
-                       "Charger current too high, "
-                       "charging not started\n");
-               return -ENXIO;
-       }
-
-       ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-               AB8500_CH_OPT_CRNTLVL_REG, (u8) curr_index);
+       ret = ab8500_charger_set_output_curr(di, ich_out);
        if (ret) {
-               dev_err(di->dev, "%s write failed\n", __func__);
+               dev_err(di->dev, "%s "
+                       "Failed to set ChOutputCurentLevel\n",
+                       __func__);
                return ret;
        }
 
@@ -1597,7 +1938,7 @@ static void ab8500_charger_ac_work(struct work_struct *work)
         * synchronously, we have the check if the main charger is
         * connected by reading the status register
         */
-       ret = ab8500_charger_detect_chargers(di);
+       ret = ab8500_charger_detect_chargers(di, false);
        if (ret < 0)
                return;
 
@@ -1612,6 +1953,84 @@ static void ab8500_charger_ac_work(struct work_struct *work)
        sysfs_notify(&di->ac_chg.psy.dev->kobj, NULL, "present");
 }
 
+static void ab8500_charger_usb_attached_work(struct work_struct *work)
+{
+       struct ab8500_charger *di = container_of(work,
+                                                struct ab8500_charger,
+                                                usb_charger_attached_work.work);
+       int usbch = (USB_CH_VBUSDROP | USB_CH_VBUSDETDBNC);
+       int ret, i;
+       u8 statval;
+
+       for (i = 0; i < 10; i++) {
+               ret = abx500_get_register_interruptible(di->dev,
+                                                       AB8500_CHARGER,
+                                                       AB8500_CH_USBCH_STAT1_REG,
+                                                       &statval);
+               if (ret < 0) {
+                       dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
+                       goto reschedule;
+               }
+               if ((statval & usbch) != usbch)
+                       goto reschedule;
+
+               msleep(CHARGER_STATUS_POLL);
+       }
+
+       ab8500_charger_usb_en(&di->usb_chg, 0, 0, 0);
+
+       mutex_lock(&di->charger_attached_mutex);
+       mutex_unlock(&di->charger_attached_mutex);
+
+       return;
+
+reschedule:
+       queue_delayed_work(di->charger_wq,
+                          &di->usb_charger_attached_work,
+                          HZ);
+}
+
+static void ab8500_charger_ac_attached_work(struct work_struct *work)
+{
+
+       struct ab8500_charger *di = container_of(work,
+                                                struct ab8500_charger,
+                                                ac_charger_attached_work.work);
+       int mainch = (MAIN_CH_STATUS2_MAINCHGDROP |
+                     MAIN_CH_STATUS2_MAINCHARGERDETDBNC);
+       int ret, i;
+       u8 statval;
+
+       for (i = 0; i < 10; i++) {
+               ret = abx500_get_register_interruptible(di->dev,
+                                                       AB8500_CHARGER,
+                                                       AB8500_CH_STATUS2_REG,
+                                                       &statval);
+               if (ret < 0) {
+                       dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
+                       goto reschedule;
+               }
+
+               if ((statval & mainch) != mainch)
+                       goto reschedule;
+
+               msleep(CHARGER_STATUS_POLL);
+       }
+
+       ab8500_charger_ac_en(&di->ac_chg, 0, 0, 0);
+       queue_work(di->charger_wq, &di->ac_work);
+
+       mutex_lock(&di->charger_attached_mutex);
+       mutex_unlock(&di->charger_attached_mutex);
+
+       return;
+
+reschedule:
+       queue_delayed_work(di->charger_wq,
+                          &di->ac_charger_attached_work,
+                          HZ);
+}
+
 /**
  * ab8500_charger_detect_usb_type_work() - work to detect USB type
  * @work:      Pointer to the work_struct structure
@@ -1630,16 +2049,18 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
         * synchronously, we have the check if is
         * connected by reading the status register
         */
-       ret = ab8500_charger_detect_chargers(di);
+       ret = ab8500_charger_detect_chargers(di, false);
        if (ret < 0)
                return;
 
        if (!(ret & USB_PW_CONN)) {
-               di->vbus_detected = 0;
+               dev_dbg(di->dev, "%s di->vbus_detected = false\n", __func__);
+               di->vbus_detected = false;
                ab8500_charger_set_usb_connected(di, false);
                ab8500_power_supply_changed(di, &di->usb_chg.psy);
        } else {
-               di->vbus_detected = 1;
+               dev_dbg(di->dev, "%s di->vbus_detected = true\n", __func__);
+               di->vbus_detected = true;
 
                if (is_ab8500_1p1_or_earlier(di->parent)) {
                        ret = ab8500_charger_detect_usb_type(di);
@@ -1649,7 +2070,8 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
                                                            &di->usb_chg.psy);
                        }
                } else {
-                       /* For ABB cut2.0 and onwards we have an IRQ,
+                       /*
+                        * For ABB cut2.0 and onwards we have an IRQ,
                         * USB_LINK_STATUS that will be triggered when the USB
                         * link status changes. The exception is USB connected
                         * during startup. Then we don't get a
@@ -1669,6 +2091,29 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
        }
 }
 
+/**
+ * ab8500_charger_usb_link_attach_work() - work to detect USB type
+ * @work:      pointer to the work_struct structure
+ *
+ * Detect the type of USB plugged
+ */
+static void ab8500_charger_usb_link_attach_work(struct work_struct *work)
+{
+       struct ab8500_charger *di =
+               container_of(work, struct ab8500_charger, attach_work.work);
+       int ret;
+
+       /* Update maximum input current if USB enumeration is not detected */
+       if (!di->usb.charger_online) {
+               ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+               if (ret)
+                       return;
+       }
+
+       ab8500_charger_set_usb_connected(di, true);
+       ab8500_power_supply_changed(di, &di->usb_chg.psy);
+}
+
 /**
  * ab8500_charger_usb_link_status_work() - work to detect USB type
  * @work:      pointer to the work_struct structure
@@ -1677,7 +2122,9 @@ static void ab8500_charger_detect_usb_type_work(struct work_struct *work)
  */
 static void ab8500_charger_usb_link_status_work(struct work_struct *work)
 {
+       int detected_chargers;
        int ret;
+       u8 val;
 
        struct ab8500_charger *di = container_of(work,
                struct ab8500_charger, usb_link_status_work);
@@ -1687,31 +2134,95 @@ static void ab8500_charger_usb_link_status_work(struct work_struct *work)
         * synchronously, we have the check if  is
         * connected by reading the status register
         */
-       ret = ab8500_charger_detect_chargers(di);
-       if (ret < 0)
+       detected_chargers = ab8500_charger_detect_chargers(di, false);
+       if (detected_chargers < 0)
                return;
 
-       if (!(ret & USB_PW_CONN)) {
-               di->vbus_detected = 0;
+       /*
+        * Some chargers that breaks the USB spec is
+        * identified as invalid by AB8500 and it refuse
+        * to start the charging process. but by jumping
+        * thru a few hoops it can be forced to start.
+        */
+       ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+                       AB8500_USB_LINE_STAT_REG, &val);
+       if (ret >= 0)
+               dev_dbg(di->dev, "UsbLineStatus register = 0x%02x\n", val);
+       else
+               dev_dbg(di->dev, "Error reading USB link status\n");
+
+       if (detected_chargers & USB_PW_CONN) {
+               if (((val & AB8500_USB_LINK_STATUS) >> 3) == USB_STAT_NOT_VALID_LINK &&
+                               di->invalid_charger_detect_state == 0) {
+                       dev_dbg(di->dev, "Invalid charger detected, state= 0\n");
+                       /*Enable charger*/
+                       abx500_mask_and_set_register_interruptible(di->dev,
+                                       AB8500_CHARGER, AB8500_USBCH_CTRL1_REG, 0x01, 0x01);
+                       /*Enable charger detection*/
+                       abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
+                                       AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x01);
+                       di->invalid_charger_detect_state = 1;
+                       /*exit and wait for new link status interrupt.*/
+                       return;
+
+               }
+               if (di->invalid_charger_detect_state == 1) {
+                       dev_dbg(di->dev, "Invalid charger detected, state= 1\n");
+                       /*Stop charger detection*/
+                       abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
+                                       AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x00);
+                       /*Check link status*/
+                       ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+                                       AB8500_USB_LINE_STAT_REG, &val);
+                       dev_dbg(di->dev, "USB link status= 0x%02x\n",
+                                       (val & AB8500_USB_LINK_STATUS) >> 3);
+                       di->invalid_charger_detect_state = 2;
+               }
+       } else {
+               di->invalid_charger_detect_state = 0;
+       }
+
+       if (!(detected_chargers & USB_PW_CONN)) {
+               di->vbus_detected = false;
                ab8500_charger_set_usb_connected(di, false);
                ab8500_power_supply_changed(di, &di->usb_chg.psy);
-       } else {
-               di->vbus_detected = 1;
-               ret = ab8500_charger_read_usb_type(di);
-               if (!ret) {
-                       /* Update maximum input current */
-                       ret = ab8500_charger_set_vbus_in_curr(di,
-                                       di->max_usb_in_curr);
-                       if (ret)
-                               return;
+               return;
+       }
 
-                       ab8500_charger_set_usb_connected(di, true);
-                       ab8500_power_supply_changed(di, &di->usb_chg.psy);
-               } else if (ret == -ENXIO) {
+       dev_dbg(di->dev,"%s di->vbus_detected = true\n",__func__);
+       di->vbus_detected = true;
+       ret = ab8500_charger_read_usb_type(di);
+       if (ret) {
+               if (ret == -ENXIO) {
                        /* No valid charger type detected */
                        ab8500_charger_set_usb_connected(di, false);
                        ab8500_power_supply_changed(di, &di->usb_chg.psy);
                }
+               return;
+       }
+
+       if (di->usb_device_is_unrecognised) {
+               dev_dbg(di->dev,
+                       "Potential Legacy Charger device. "
+                       "Delay work for %d msec for USB enum "
+                       "to finish",
+                       WAIT_ACA_RID_ENUMERATION);
+               queue_delayed_work(di->charger_wq,
+                                  &di->attach_work,
+                                  msecs_to_jiffies(WAIT_ACA_RID_ENUMERATION));
+       } else if (di->is_aca_rid == 1) {
+               /* Only wait once */
+               di->is_aca_rid++;
+               dev_dbg(di->dev,
+                       "%s Wait %d msec for USB enum to finish",
+                       __func__, WAIT_ACA_RID_ENUMERATION);
+               queue_delayed_work(di->charger_wq,
+                                  &di->attach_work,
+                                  msecs_to_jiffies(WAIT_ACA_RID_ENUMERATION));
+       } else {
+               queue_delayed_work(di->charger_wq,
+                                  &di->attach_work,
+                                  0);
        }
 }
 
@@ -1721,24 +2232,20 @@ static void ab8500_charger_usb_state_changed_work(struct work_struct *work)
        unsigned long flags;
 
        struct ab8500_charger *di = container_of(work,
-               struct ab8500_charger, usb_state_changed_work);
+               struct ab8500_charger, usb_state_changed_work.work);
 
-       if (!di->vbus_detected)
+       if (!di->vbus_detected) {
+               dev_dbg(di->dev,
+                       "%s !di->vbus_detected\n",
+                       __func__);
                return;
+       }
 
        spin_lock_irqsave(&di->usb_state.usb_lock, flags);
-       di->usb_state.usb_changed = false;
+       di->usb_state.state = di->usb_state.state_tmp;
+       di->usb_state.usb_current = di->usb_state.usb_current_tmp;
        spin_unlock_irqrestore(&di->usb_state.usb_lock, flags);
 
-       /*
-        * wait for some time until you get updates from the usb stack
-        * and negotiations are completed
-        */
-       msleep(250);
-
-       if (di->usb_state.usb_changed)
-               return;
-
        dev_dbg(di->dev, "%s USB state: 0x%02x mA: %d\n",
                __func__, di->usb_state.state, di->usb_state.usb_current);
 
@@ -1892,6 +2399,10 @@ static irqreturn_t ab8500_charger_mainchunplugdet_handler(int irq, void *_di)
        dev_dbg(di->dev, "Main charger unplugged\n");
        queue_work(di->charger_wq, &di->ac_work);
 
+       cancel_delayed_work_sync(&di->ac_charger_attached_work);
+       mutex_lock(&di->charger_attached_mutex);
+       mutex_unlock(&di->charger_attached_mutex);
+
        return IRQ_HANDLED;
 }
 
@@ -1909,6 +2420,11 @@ static irqreturn_t ab8500_charger_mainchplugdet_handler(int irq, void *_di)
        dev_dbg(di->dev, "Main charger plugged\n");
        queue_work(di->charger_wq, &di->ac_work);
 
+       mutex_lock(&di->charger_attached_mutex);
+       mutex_unlock(&di->charger_attached_mutex);
+       queue_delayed_work(di->charger_wq,
+                          &di->ac_charger_attached_work,
+                          HZ);
        return IRQ_HANDLED;
 }
 
@@ -1971,6 +2487,21 @@ static irqreturn_t ab8500_charger_mainchthprotf_handler(int irq, void *_di)
        return IRQ_HANDLED;
 }
 
+static void ab8500_charger_vbus_drop_end_work(struct work_struct *work)
+{
+       struct ab8500_charger *di = container_of(work,
+               struct ab8500_charger, vbus_drop_end_work.work);
+
+       di->flags.vbus_drop_end = false;
+
+       /* Reset the drop counter */
+       abx500_set_register_interruptible(di->dev,
+                                 AB8500_CHARGER, AB8500_CHARGER_CTRL, 0x01);
+
+       if (di->usb.charger_connected)
+               ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+}
+
 /**
  * ab8500_charger_vbusdetf_handler() - VBUS falling detected
  * @irq:       interrupt number
@@ -1982,6 +2513,7 @@ static irqreturn_t ab8500_charger_vbusdetf_handler(int irq, void *_di)
 {
        struct ab8500_charger *di = _di;
 
+       di->vbus_detected = false;
        dev_dbg(di->dev, "VBUS falling detected\n");
        queue_work(di->charger_wq, &di->detect_usb_type_work);
 
@@ -2001,6 +2533,7 @@ static irqreturn_t ab8500_charger_vbusdetr_handler(int irq, void *_di)
 
        di->vbus_detected = true;
        dev_dbg(di->dev, "VBUS rising detected\n");
+
        queue_work(di->charger_wq, &di->detect_usb_type_work);
 
        return IRQ_HANDLED;
@@ -2108,6 +2641,25 @@ static irqreturn_t ab8500_charger_chwdexp_handler(int irq, void *_di)
        return IRQ_HANDLED;
 }
 
+/**
+ * ab8500_charger_vbuschdropend_handler() - VBUS drop removed
+ * @irq:       interrupt number
+ * @_di:       pointer to the ab8500_charger structure
+ *
+ * Returns IRQ status(IRQ_HANDLED)
+ */
+static irqreturn_t ab8500_charger_vbuschdropend_handler(int irq, void *_di)
+{
+       struct ab8500_charger *di = _di;
+
+       dev_dbg(di->dev, "VBUS charger drop ended\n");
+       di->flags.vbus_drop_end = true;
+       queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work,
+                          round_jiffies(30 * HZ));
+
+       return IRQ_HANDLED;
+}
+
 /**
  * ab8500_charger_vbusovv_handler() - VBUS overvoltage detected
  * @irq:       interrupt number
@@ -2148,6 +2700,7 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
        union power_supply_propval *val)
 {
        struct ab8500_charger *di;
+       int ret;
 
        di = to_ab8500_charger_ac_device_info(psy_to_ux500_charger(psy));
 
@@ -2169,7 +2722,10 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
                val->intval = di->ac.charger_connected;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               di->ac.charger_voltage = ab8500_charger_get_ac_voltage(di);
+               ret = ab8500_charger_get_ac_voltage(di);
+               if (ret >= 0)
+                       di->ac.charger_voltage = ret;
+               /* On error, use previous value */
                val->intval = di->ac.charger_voltage * 1000;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_AVG:
@@ -2181,7 +2737,10 @@ static int ab8500_charger_ac_get_property(struct power_supply *psy,
                val->intval = di->ac.cv_active;
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               val->intval = ab8500_charger_get_ac_current(di) * 1000;
+               ret = ab8500_charger_get_ac_current(di);
+               if (ret >= 0)
+                       di->ac.charger_current = ret;
+               val->intval = di->ac.charger_current * 1000;
                break;
        default:
                return -EINVAL;
@@ -2208,6 +2767,7 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
        union power_supply_propval *val)
 {
        struct ab8500_charger *di;
+       int ret;
 
        di = to_ab8500_charger_usb_device_info(psy_to_ux500_charger(psy));
 
@@ -2231,7 +2791,9 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
                val->intval = di->usb.charger_connected;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
-               di->usb.charger_voltage = ab8500_charger_get_vbus_voltage(di);
+               ret = ab8500_charger_get_vbus_voltage(di);
+               if (ret >= 0)
+                       di->usb.charger_voltage = ret;
                val->intval = di->usb.charger_voltage * 1000;
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_AVG:
@@ -2243,7 +2805,10 @@ static int ab8500_charger_usb_get_property(struct power_supply *psy,
                val->intval = di->usb.cv_active;
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               val->intval = ab8500_charger_get_usb_current(di) * 1000;
+               ret = ab8500_charger_get_usb_current(di);
+               if (ret >= 0)
+                       di->usb.charger_current = ret;
+               val->intval = di->usb.charger_current * 1000;
                break;
        case POWER_SUPPLY_PROP_CURRENT_AVG:
                /*
@@ -2293,13 +2858,23 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
                }
        }
 
-       /* VBUS OVV set to 6.3V and enable automatic current limitiation */
-       ret = abx500_set_register_interruptible(di->dev,
-               AB8500_CHARGER,
-               AB8500_USBCH_CTRL2_REG,
-               VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
+       if (is_ab9540_2p0(di->parent) || is_ab8505_2p0(di->parent))
+               ret = abx500_mask_and_set_register_interruptible(di->dev,
+                       AB8500_CHARGER,
+                       AB8500_USBCH_CTRL2_REG,
+                       VBUS_AUTO_IN_CURR_LIM_ENA,
+                       VBUS_AUTO_IN_CURR_LIM_ENA);
+       else
+               /*
+                * VBUS OVV set to 6.3V and enable automatic current limitation
+                */
+               ret = abx500_set_register_interruptible(di->dev,
+                       AB8500_CHARGER,
+                       AB8500_USBCH_CTRL2_REG,
+                       VBUS_OVV_SELECT_6P3V | VBUS_AUTO_IN_CURR_LIM_ENA);
        if (ret) {
-               dev_err(di->dev, "failed to set VBUS OVV\n");
+               dev_err(di->dev,
+                       "failed to set automatic current limitation\n");
                goto out;
        }
 
@@ -2355,12 +2930,26 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
                goto out;
        }
 
+       /* Set charger watchdog timeout */
+       ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
+               AB8500_CH_WD_TIMER_REG, WD_TIMER);
+       if (ret) {
+               dev_err(di->dev, "failed to set charger watchdog timeout\n");
+               goto out;
+       }
+
+       ret = ab8500_charger_led_en(di, false);
+       if (ret < 0) {
+               dev_err(di->dev, "failed to disable LED\n");
+               goto out;
+       }
+
        /* Backup battery voltage and current */
        ret = abx500_set_register_interruptible(di->dev,
                AB8500_RTC,
                AB8500_RTC_BACKUP_CHG_REG,
-               di->bat->bkup_bat_v |
-               di->bat->bkup_bat_i);
+               di->bm->bkup_bat_v |
+               di->bm->bkup_bat_i);
        if (ret) {
                dev_err(di->dev, "failed to setup backup battery charging\n");
                goto out;
@@ -2394,6 +2983,7 @@ static struct ab8500_charger_interrupts ab8500_charger_irq[] = {
        {"USB_CHARGER_NOT_OKR", ab8500_charger_usbchargernotokr_handler},
        {"VBUS_OVV", ab8500_charger_vbusovv_handler},
        {"CH_WD_EXP", ab8500_charger_chwdexp_handler},
+       {"VBUS_CH_DROP_END", ab8500_charger_vbuschdropend_handler},
 };
 
 static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
@@ -2404,6 +2994,9 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
        enum ab8500_usb_state bm_usb_state;
        unsigned mA = *((unsigned *)power);
 
+       if (!di)
+               return NOTIFY_DONE;
+
        if (event != USB_EVENT_VBUS) {
                dev_dbg(di->dev, "not a standard host, returning\n");
                return NOTIFY_DONE;
@@ -2427,13 +3020,15 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
                __func__, bm_usb_state, mA);
 
        spin_lock(&di->usb_state.usb_lock);
-       di->usb_state.usb_changed = true;
+       di->usb_state.state_tmp = bm_usb_state;
+       di->usb_state.usb_current_tmp = mA;
        spin_unlock(&di->usb_state.usb_lock);
 
-       di->usb_state.state = bm_usb_state;
-       di->usb_state.usb_current = mA;
-
-       queue_work(di->charger_wq, &di->usb_state_changed_work);
+       /*
+        * wait for some time until you get updates from the usb stack
+        * and negotiations are completed
+        */
+       queue_delayed_work(di->charger_wq, &di->usb_state_changed_work, HZ/2);
 
        return NOTIFY_OK;
 }
@@ -2473,6 +3068,9 @@ static int ab8500_charger_resume(struct platform_device *pdev)
                        &di->check_hw_failure_work, 0);
        }
 
+       if (di->flags.vbus_drop_end)
+               queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work, 0);
+
        return 0;
 }
 
@@ -2485,6 +3083,23 @@ static int ab8500_charger_suspend(struct platform_device *pdev,
        if (delayed_work_pending(&di->check_hw_failure_work))
                cancel_delayed_work(&di->check_hw_failure_work);
 
+       if (delayed_work_pending(&di->vbus_drop_end_work))
+               cancel_delayed_work(&di->vbus_drop_end_work);
+
+       flush_delayed_work(&di->attach_work);
+       flush_delayed_work(&di->usb_charger_attached_work);
+       flush_delayed_work(&di->ac_charger_attached_work);
+       flush_delayed_work(&di->check_usbchgnotok_work);
+       flush_delayed_work(&di->check_vbat_work);
+       flush_delayed_work(&di->kick_wd_work);
+
+       flush_work(&di->usb_link_status_work);
+       flush_work(&di->ac_work);
+       flush_work(&di->detect_usb_type_work);
+
+       if (atomic_read(&di->current_stepping_sessions))
+               return -EAGAIN;
+
        return 0;
 }
 #else
@@ -2509,9 +3124,6 @@ static int ab8500_charger_remove(struct platform_device *pdev)
                free_irq(irq, di);
        }
 
-       /* disable the regulator */
-       regulator_put(di->regu);
-
        /* Backup battery voltage and current disable */
        ret = abx500_mask_and_set_register_interruptible(di->dev,
                AB8500_RTC, AB8500_RTC_CTRL_REG, RTC_BUP_CH_ENA, 0);
@@ -2525,8 +3137,12 @@ static int ab8500_charger_remove(struct platform_device *pdev)
        destroy_workqueue(di->charger_wq);
 
        flush_scheduled_work();
-       power_supply_unregister(&di->usb_chg.psy);
-       power_supply_unregister(&di->ac_chg.psy);
+       if(di->usb_chg.enabled)
+               power_supply_unregister(&di->usb_chg.psy);
+#if !defined(CONFIG_CHARGER_PM2301)
+       if(di->ac_chg.enabled)
+               power_supply_unregister(&di->ac_chg.psy);
+#endif
        platform_set_drvdata(pdev, NULL);
 
        return 0;
@@ -2541,32 +3157,31 @@ static char *supply_interface[] = {
 static int ab8500_charger_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct abx500_bm_data *plat = pdev->dev.platform_data;
        struct ab8500_charger *di;
-       int irq, i, charger_status, ret = 0;
+       int irq, i, charger_status, ret = 0, ch_stat;
 
        di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
        if (!di) {
                dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__);
                return -ENOMEM;
        }
-       di->bat = pdev->mfd_cell->platform_data;
-       if (!di->bat) {
-               if (np) {
-                       ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "failed to get battery information\n");
-                               return ret;
-                       }
-                       di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
-               } else {
-                       dev_err(&pdev->dev, "missing dt node for ab8500_charger\n");
-                       return -EINVAL;
+
+       if (!plat) {
+               dev_err(&pdev->dev, "no battery management data supplied\n");
+               return -EINVAL;
+       }
+       di->bm = plat;
+
+       if (np) {
+               ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get battery information\n");
+                       return ret;
                }
-       } else {
-               dev_info(&pdev->dev, "falling back to legacy platform data\n");
+               di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
+       } else
                di->autopower_cfg = false;
-       }
 
        /* get parent data */
        di->dev = &pdev->dev;
@@ -2575,8 +3190,10 @@ static int ab8500_charger_probe(struct platform_device *pdev)
 
        /* initialize lock */
        spin_lock_init(&di->usb_state.usb_lock);
+       mutex_init(&di->usb_ipt_crnt_lock);
 
        di->autopower = false;
+       di->invalid_charger_detect_state = 0;
 
        /* AC supply */
        /* power_supply base class */
@@ -2595,6 +3212,9 @@ static int ab8500_charger_probe(struct platform_device *pdev)
                ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
        di->ac_chg.max_out_curr = ab8500_charger_current_map[
                ARRAY_SIZE(ab8500_charger_current_map) - 1];
+       di->ac_chg.wdt_refresh = CHG_WD_INTERVAL;
+       di->ac_chg.enabled = di->bm->ac_enabled;
+       di->ac_chg.external = false;
 
        /* USB supply */
        /* power_supply base class */
@@ -2613,7 +3233,9 @@ static int ab8500_charger_probe(struct platform_device *pdev)
                ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
        di->usb_chg.max_out_curr = ab8500_charger_current_map[
                ARRAY_SIZE(ab8500_charger_current_map) - 1];
-
+       di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
+       di->usb_chg.enabled = di->bm->usb_enabled;
+       di->usb_chg.external = false;
 
        /* Create a work queue for the charger */
        di->charger_wq =
@@ -2623,12 +3245,19 @@ static int ab8500_charger_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       mutex_init(&di->charger_attached_mutex);
+
        /* Init work for HW failure check */
        INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
                ab8500_charger_check_hw_failure_work);
        INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
                ab8500_charger_check_usbchargernotok_work);
 
+       INIT_DELAYED_WORK(&di->ac_charger_attached_work,
+                         ab8500_charger_ac_attached_work);
+       INIT_DELAYED_WORK(&di->usb_charger_attached_work,
+                         ab8500_charger_usb_attached_work);
+
        /*
         * For ABB revision 1.0 and 1.1 there is a bug in the watchdog
         * logic. That means we have to continously kick the charger
@@ -2644,6 +3273,15 @@ static int ab8500_charger_probe(struct platform_device *pdev)
        INIT_DEFERRABLE_WORK(&di->check_vbat_work,
                ab8500_charger_check_vbat_work);
 
+       INIT_DELAYED_WORK(&di->attach_work,
+               ab8500_charger_usb_link_attach_work);
+
+       INIT_DELAYED_WORK(&di->usb_state_changed_work,
+               ab8500_charger_usb_state_changed_work);
+
+       INIT_DELAYED_WORK(&di->vbus_drop_end_work,
+               ab8500_charger_vbus_drop_end_work);
+
        /* Init work for charger detection */
        INIT_WORK(&di->usb_link_status_work,
                ab8500_charger_usb_link_status_work);
@@ -2651,9 +3289,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
        INIT_WORK(&di->detect_usb_type_work,
                ab8500_charger_detect_usb_type_work);
 
-       INIT_WORK(&di->usb_state_changed_work,
-               ab8500_charger_usb_state_changed_work);
-
        /* Init work for checking HW status */
        INIT_WORK(&di->check_main_thermal_prot_work,
                ab8500_charger_check_main_thermal_prot_work);
@@ -2665,7 +3300,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
         * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
         * interrupts during charging
         */
-       di->regu = regulator_get(di->dev, "vddadc");
+       di->regu = devm_regulator_get(di->dev, "vddadc");
        if (IS_ERR(di->regu)) {
                ret = PTR_ERR(di->regu);
                dev_err(di->dev, "failed to get vddadc regulator\n");
@@ -2677,21 +3312,25 @@ static int ab8500_charger_probe(struct platform_device *pdev)
        ret = ab8500_charger_init_hw_registers(di);
        if (ret) {
                dev_err(di->dev, "failed to initialize ABB registers\n");
-               goto free_regulator;
+               goto free_charger_wq;
        }
 
        /* Register AC charger class */
-       ret = power_supply_register(di->dev, &di->ac_chg.psy);
-       if (ret) {
-               dev_err(di->dev, "failed to register AC charger\n");
-               goto free_regulator;
+       if(di->ac_chg.enabled) {
+               ret = power_supply_register(di->dev, &di->ac_chg.psy);
+               if (ret) {
+                       dev_err(di->dev, "failed to register AC charger\n");
+                       goto free_charger_wq;
+               }
        }
 
        /* Register USB charger class */
-       ret = power_supply_register(di->dev, &di->usb_chg.psy);
-       if (ret) {
-               dev_err(di->dev, "failed to register USB charger\n");
-               goto free_ac;
+       if(di->usb_chg.enabled) {
+               ret = power_supply_register(di->dev, &di->usb_chg.psy);
+               if (ret) {
+                       dev_err(di->dev, "failed to register USB charger\n");
+                       goto free_ac;
+               }
        }
 
        di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
@@ -2708,7 +3347,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
        }
 
        /* Identify the connected charger types during startup */
-       charger_status = ab8500_charger_detect_chargers(di);
+       charger_status = ab8500_charger_detect_chargers(di, true);
        if (charger_status & AC_PW_CONN) {
                di->ac.charger_connected = 1;
                di->ac_conn = true;
@@ -2717,7 +3356,6 @@ static int ab8500_charger_probe(struct platform_device *pdev)
        }
 
        if (charger_status & USB_PW_CONN) {
-               dev_dbg(di->dev, "VBUS Detect during startup\n");
                di->vbus_detected = true;
                di->vbus_detected_start = true;
                queue_work(di->charger_wq,
@@ -2742,6 +3380,23 @@ static int ab8500_charger_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, di);
 
+       mutex_lock(&di->charger_attached_mutex);
+
+       ch_stat = ab8500_charger_detect_chargers(di, false);
+
+       if ((ch_stat & AC_PW_CONN) == AC_PW_CONN) {
+               queue_delayed_work(di->charger_wq,
+                                  &di->ac_charger_attached_work,
+                                  HZ);
+       }
+       if ((ch_stat & USB_PW_CONN) == USB_PW_CONN) {
+               queue_delayed_work(di->charger_wq,
+                                  &di->usb_charger_attached_work,
+                                  HZ);
+       }
+
+       mutex_unlock(&di->charger_attached_mutex);
+
        return ret;
 
 free_irq:
@@ -2755,11 +3410,11 @@ free_irq:
 put_usb_phy:
        usb_put_phy(di->usb_phy);
 free_usb:
-       power_supply_unregister(&di->usb_chg.psy);
+       if(di->usb_chg.enabled)
+               power_supply_unregister(&di->usb_chg.psy);
 free_ac:
-       power_supply_unregister(&di->ac_chg.psy);
-free_regulator:
-       regulator_put(di->regu);
+       if(di->ac_chg.enabled)
+               power_supply_unregister(&di->ac_chg.psy);
 free_charger_wq:
        destroy_workqueue(di->charger_wq);
        return ret;
index b3bf178c346270fdb22800c53ed21b574a25b956..25dae4c4b0ef328501792ce48acd0c5fde474b4d 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/mfd/abx500/ab8500-bm.h>
 #include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/kernel.h>
 
 #define MILLI_TO_MICRO                 1000
 #define FG_LSB_IN_MA                   1627
@@ -42,7 +43,7 @@
 
 #define NBR_AVG_SAMPLES                        20
 
-#define LOW_BAT_CHECK_INTERVAL         (2 * HZ)
+#define LOW_BAT_CHECK_INTERVAL         (HZ / 16) /* 62.5 ms */
 
 #define VALID_CAPACITY_SEC             (45 * 60) /* 45 minutes */
 #define BATT_OK_MIN                    2360 /* mV */
@@ -113,6 +114,13 @@ struct ab8500_fg_avg_cap {
        int sum;
 };
 
+struct ab8500_fg_cap_scaling {
+       bool enable;
+       int cap_to_scale[2];
+       int disable_cap_level;
+       int scaled_cap;
+};
+
 struct ab8500_fg_battery_capacity {
        int max_mah_design;
        int max_mah;
@@ -123,6 +131,7 @@ struct ab8500_fg_battery_capacity {
        int prev_percent;
        int prev_level;
        int user_mah;
+       struct ab8500_fg_cap_scaling cap_scale;
 };
 
 struct ab8500_fg_flags {
@@ -160,6 +169,8 @@ struct inst_curr_result_list {
  * @recovery_cnt:      Counter for recovery mode
  * @high_curr_cnt:     Counter for high current mode
  * @init_cnt:          Counter for init mode
+ * @low_bat_cnt                Counter for number of consecutive low battery measures
+ * @nbr_cceoc_irq_cnt  Counter for number of CCEOC irqs received since enabled
  * @recovery_needed:   Indicate if recovery is needed
  * @high_curr_mode:    Indicate if we're in high current mode
  * @init_capacity:     Indicate if initial capacity measuring should be done
@@ -167,13 +178,14 @@ struct inst_curr_result_list {
  * @calib_state                State during offset calibration
  * @discharge_state:   Current discharge state
  * @charge_state:      Current charge state
+ * @ab8500_fg_started  Completion struct used for the instant current start
  * @ab8500_fg_complete Completion struct used for the instant current reading
  * @flags:             Structure for information about events triggered
  * @bat_cap:           Structure for battery capacity specific parameters
  * @avg_cap:           Average capacity filter
  * @parent:            Pointer to the struct ab8500
  * @gpadc:             Pointer to the struct gpadc
- * @bat:               Pointer to the abx500_bm platform data
+ * @bm:                Platform specific battery management information
  * @fg_psy:            Structure that holds the FG specific battery properties
  * @fg_wq:             Work queue for running the FG algorithm
  * @fg_periodic_work:  Work to run the FG algorithm periodically
@@ -199,6 +211,8 @@ struct ab8500_fg {
        int recovery_cnt;
        int high_curr_cnt;
        int init_cnt;
+       int low_bat_cnt;
+       int nbr_cceoc_irq_cnt;
        bool recovery_needed;
        bool high_curr_mode;
        bool init_capacity;
@@ -206,13 +220,14 @@ struct ab8500_fg {
        enum ab8500_fg_calibration_state calib_state;
        enum ab8500_fg_discharge_state discharge_state;
        enum ab8500_fg_charge_state charge_state;
+       struct completion ab8500_fg_started;
        struct completion ab8500_fg_complete;
        struct ab8500_fg_flags flags;
        struct ab8500_fg_battery_capacity bat_cap;
        struct ab8500_fg_avg_cap avg_cap;
        struct ab8500 *parent;
        struct ab8500_gpadc *gpadc;
-       struct abx500_bm_data *bat;
+       struct abx500_bm_data *bm;
        struct power_supply fg_psy;
        struct workqueue_struct *fg_wq;
        struct delayed_work fg_periodic_work;
@@ -355,7 +370,7 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
        /*
         * We want to know if we're in low current mode
         */
-       if (curr > -di->bat->fg_params->high_curr_threshold)
+       if (curr > -di->bm->fg_params->high_curr_threshold)
                return true;
        else
                return false;
@@ -484,8 +499,9 @@ static int ab8500_fg_coulomb_counter(struct ab8500_fg *di, bool enable)
                di->flags.fg_enabled = true;
        } else {
                /* Clear any pending read requests */
-               ret = abx500_set_register_interruptible(di->dev,
-                       AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG, 0);
+               ret = abx500_mask_and_set_register_interruptible(di->dev,
+                       AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
+                       (RESET_ACCU | READ_REQ), 0);
                if (ret)
                        goto cc_err;
 
@@ -523,13 +539,14 @@ cc_err:
  * Note: This is part "one" and has to be called before
  * ab8500_fg_inst_curr_finalize()
  */
- int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
 {
        u8 reg_val;
        int ret;
 
        mutex_lock(&di->cc_lock);
 
+       di->nbr_cceoc_irq_cnt = 0;
        ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
                AB8500_RTC_CC_CONF_REG, &reg_val);
        if (ret < 0)
@@ -557,6 +574,7 @@ cc_err:
        }
 
        /* Return and WFI */
+       INIT_COMPLETION(di->ab8500_fg_started);
        INIT_COMPLETION(di->ab8500_fg_complete);
        enable_irq(di->irq);
 
@@ -567,6 +585,17 @@ fail:
        return ret;
 }
 
+/**
+ * ab8500_fg_inst_curr_started() - check if fg conversion has started
+ * @di:         pointer to the ab8500_fg structure
+ *
+ * Returns 1 if conversion started, 0 if still waiting
+ */
+int ab8500_fg_inst_curr_started(struct ab8500_fg *di)
+{
+       return completion_done(&di->ab8500_fg_started);
+}
+
 /**
  * ab8500_fg_inst_curr_done() - check if fg conversion is done
  * @di:         pointer to the ab8500_fg structure
@@ -595,13 +624,15 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
        int timeout;
 
        if (!completion_done(&di->ab8500_fg_complete)) {
-               timeout = wait_for_completion_timeout(&di->ab8500_fg_complete,
+               timeout = wait_for_completion_timeout(
+                       &di->ab8500_fg_complete,
                        INS_CURR_TIMEOUT);
                dev_dbg(di->dev, "Finalize time: %d ms\n",
                        ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
                if (!timeout) {
                        ret = -ETIME;
                        disable_irq(di->irq);
+                       di->nbr_cceoc_irq_cnt = 0;
                        dev_err(di->dev, "completion timed out [%d]\n",
                                __LINE__);
                        goto fail;
@@ -609,6 +640,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
        }
 
        disable_irq(di->irq);
+       di->nbr_cceoc_irq_cnt = 0;
 
        ret = abx500_mask_and_set_register_interruptible(di->dev,
                        AB8500_GAS_GAUGE, AB8500_GASG_CC_CTRL_REG,
@@ -647,7 +679,7 @@ int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
         * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
         */
        val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
-               (1000 * di->bat->fg_res);
+               (1000 * di->bm->fg_res);
 
        if (di->turn_off_fg) {
                dev_dbg(di->dev, "%s Disable FG\n", __func__);
@@ -683,6 +715,7 @@ fail:
 int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
 {
        int ret;
+       int timeout;
        int res = 0;
 
        ret = ab8500_fg_inst_curr_start(di);
@@ -691,13 +724,33 @@ int ab8500_fg_inst_curr_blocking(struct ab8500_fg *di)
                return 0;
        }
 
+       /* Wait for CC to actually start */
+       if (!completion_done(&di->ab8500_fg_started)) {
+               timeout = wait_for_completion_timeout(
+                       &di->ab8500_fg_started,
+                       INS_CURR_TIMEOUT);
+               dev_dbg(di->dev, "Start time: %d ms\n",
+                       ((INS_CURR_TIMEOUT - timeout) * 1000) / HZ);
+               if (!timeout) {
+                       ret = -ETIME;
+                       dev_err(di->dev, "completion timed out [%d]\n",
+                               __LINE__);
+                       goto fail;
+               }
+       }
+
        ret = ab8500_fg_inst_curr_finalize(di, &res);
        if (ret) {
                dev_err(di->dev, "Failed to finalize fg_inst\n");
                return 0;
        }
 
+       dev_dbg(di->dev, "%s instant current: %d", __func__, res);
        return res;
+fail:
+       disable_irq(di->irq);
+       mutex_unlock(&di->cc_lock);
+       return ret;
 }
 
 /**
@@ -750,19 +803,16 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
         * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
         */
        di->accu_charge = (val * QLSB_NANO_AMP_HOURS_X10) /
-               (100 * di->bat->fg_res);
+               (100 * di->bm->fg_res);
 
        /*
         * Convert to unit value in mA
-        * Full scale input voltage is
-        * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
-        * Given a 250ms conversion cycle time the LSB corresponds
-        * to 112.9 nAh. Convert to current by dividing by the conversion
+        * by dividing by the conversion
         * time in hours (= samples / (3600 * 4)h)
-        * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+        * and multiply with 1000
         */
        di->avg_curr = (val * QLSB_NANO_AMP_HOURS_X10 * 36) /
-               (1000 * di->bat->fg_res * (di->fg_samples / 4));
+               (1000 * di->bm->fg_res * (di->fg_samples / 4));
 
        di->flags.conv_done = true;
 
@@ -770,6 +820,8 @@ static void ab8500_fg_acc_cur_work(struct work_struct *work)
 
        queue_work(di->fg_wq, &di->fg_work);
 
+       dev_dbg(di->dev, "fg_res: %d, fg_samples: %d, gasg: %d, accu_charge: %d \n",
+                               di->bm->fg_res, di->fg_samples, val, di->accu_charge);
        return;
 exit:
        dev_err(di->dev,
@@ -814,8 +866,8 @@ static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
        struct abx500_v_to_cap *tbl;
        int cap = 0;
 
-       tbl = di->bat->bat_type[di->bat->batt_id].v_to_cap_tbl,
-       tbl_size = di->bat->bat_type[di->bat->batt_id].n_v_cap_tbl_elements;
+       tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl,
+       tbl_size = di->bm->bat_type[di->bm->batt_id].n_v_cap_tbl_elements;
 
        for (i = 0; i < tbl_size; ++i) {
                if (voltage > tbl[i].voltage)
@@ -866,8 +918,8 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
        struct batres_vs_temp *tbl;
        int resist = 0;
 
-       tbl = di->bat->bat_type[di->bat->batt_id].batres_tbl;
-       tbl_size = di->bat->bat_type[di->bat->batt_id].n_batres_tbl_elements;
+       tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl;
+       tbl_size = di->bm->bat_type[di->bm->batt_id].n_batres_tbl_elements;
 
        for (i = 0; i < tbl_size; ++i) {
                if (di->bat_temp / 10 > tbl[i].temp)
@@ -888,11 +940,11 @@ static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
 
        dev_dbg(di->dev, "%s Temp: %d battery internal resistance: %d"
            " fg resistance %d, total: %d (mOhm)\n",
-               __func__, di->bat_temp, resist, di->bat->fg_res / 10,
-               (di->bat->fg_res / 10) + resist);
+               __func__, di->bat_temp, resist, di->bm->fg_res / 10,
+               (di->bm->fg_res / 10) + resist);
 
        /* fg_res variable is in 0.1mOhm */
-       resist += di->bat->fg_res / 10;
+       resist += di->bm->fg_res / 10;
 
        return resist;
 }
@@ -915,7 +967,7 @@ static int ab8500_fg_load_comp_volt_to_capacity(struct ab8500_fg *di)
        do {
                vbat += ab8500_fg_bat_voltage(di);
                i++;
-               msleep(5);
+               usleep_range(5000, 6000);
        } while (!ab8500_fg_inst_curr_done(di));
 
        ab8500_fg_inst_curr_finalize(di, &di->inst_curr);
@@ -1108,16 +1160,16 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
 {
        int ret, percent;
 
-       percent = di->bat_cap.permille / 10;
+       percent = DIV_ROUND_CLOSEST(di->bat_cap.permille, 10);
 
-       if (percent <= di->bat->cap_levels->critical ||
+       if (percent <= di->bm->cap_levels->critical ||
                di->flags.low_bat)
                ret = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
-       else if (percent <= di->bat->cap_levels->low)
+       else if (percent <= di->bm->cap_levels->low)
                ret = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
-       else if (percent <= di->bat->cap_levels->normal)
+       else if (percent <= di->bm->cap_levels->normal)
                ret = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
-       else if (percent <= di->bat->cap_levels->high)
+       else if (percent <= di->bm->cap_levels->high)
                ret = POWER_SUPPLY_CAPACITY_LEVEL_HIGH;
        else
                ret = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
@@ -1125,6 +1177,99 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
        return ret;
 }
 
+/**
+ * ab8500_fg_calculate_scaled_capacity() - Capacity scaling
+ * @di:                pointer to the ab8500_fg structure
+ *
+ * Calculates the capacity to be shown to upper layers. Scales the capacity
+ * to have 100% as a reference from the actual capacity upon removal of charger
+ * when charging is in maintenance mode.
+ */
+static int ab8500_fg_calculate_scaled_capacity(struct ab8500_fg *di)
+{
+       struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale;
+       int capacity = di->bat_cap.prev_percent;
+
+       if (!cs->enable)
+               return capacity;
+
+       /*
+        * As long as we are in fully charge mode scale the capacity
+        * to show 100%.
+        */
+       if (di->flags.fully_charged) {
+               cs->cap_to_scale[0] = 100;
+               cs->cap_to_scale[1] =
+                       max(capacity, di->bm->fg_params->maint_thres);
+               dev_dbg(di->dev, "Scale cap with %d/%d\n",
+                        cs->cap_to_scale[0], cs->cap_to_scale[1]);
+       }
+
+       /* Calculates the scaled capacity. */
+       if ((cs->cap_to_scale[0] != cs->cap_to_scale[1])
+                                       && (cs->cap_to_scale[1] > 0))
+               capacity = min(100,
+                                DIV_ROUND_CLOSEST(di->bat_cap.prev_percent *
+                                                cs->cap_to_scale[0],
+                                                cs->cap_to_scale[1]));
+
+       if (di->flags.charging) {
+               if (capacity < cs->disable_cap_level) {
+                       cs->disable_cap_level = capacity;
+                       dev_dbg(di->dev, "Cap to stop scale lowered %d%%\n",
+                               cs->disable_cap_level);
+               } else if (!di->flags.fully_charged) {
+                       if (di->bat_cap.prev_percent >=
+                           cs->disable_cap_level) {
+                               dev_dbg(di->dev, "Disabling scaled capacity\n");
+                               cs->enable = false;
+                               capacity = di->bat_cap.prev_percent;
+                       } else {
+                               dev_dbg(di->dev,
+                                       "Waiting in cap to level %d%%\n",
+                                       cs->disable_cap_level);
+                               capacity = cs->disable_cap_level;
+                       }
+               }
+       }
+
+       return capacity;
+}
+
+/**
+ * ab8500_fg_update_cap_scalers() - Capacity scaling
+ * @di:                pointer to the ab8500_fg structure
+ *
+ * To be called when state change from charge<->discharge to update
+ * the capacity scalers.
+ */
+static void ab8500_fg_update_cap_scalers(struct ab8500_fg *di)
+{
+       struct ab8500_fg_cap_scaling *cs = &di->bat_cap.cap_scale;
+
+       if (!cs->enable)
+               return;
+       if (di->flags.charging) {
+               di->bat_cap.cap_scale.disable_cap_level =
+                       di->bat_cap.cap_scale.scaled_cap;
+               dev_dbg(di->dev, "Cap to stop scale at charge %d%%\n",
+                               di->bat_cap.cap_scale.disable_cap_level);
+       } else {
+               if (cs->scaled_cap != 100) {
+                       cs->cap_to_scale[0] = cs->scaled_cap;
+                       cs->cap_to_scale[1] = di->bat_cap.prev_percent;
+               } else {
+                       cs->cap_to_scale[0] = 100;
+                       cs->cap_to_scale[1] =
+                               max(di->bat_cap.prev_percent,
+                                   di->bm->fg_params->maint_thres);
+               }
+
+               dev_dbg(di->dev, "Cap to scale at discharge %d/%d\n",
+                               cs->cap_to_scale[0], cs->cap_to_scale[1]);
+       }
+}
+
 /**
  * ab8500_fg_check_capacity_limits() - Check if capacity has changed
  * @di:                pointer to the ab8500_fg structure
@@ -1136,6 +1281,7 @@ static int ab8500_fg_capacity_level(struct ab8500_fg *di)
 static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
 {
        bool changed = false;
+       int percent = DIV_ROUND_CLOSEST(di->bat_cap.permille, 10);
 
        di->bat_cap.level = ab8500_fg_capacity_level(di);
 
@@ -1167,33 +1313,41 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
                dev_dbg(di->dev, "Battery low, set capacity to 0\n");
                di->bat_cap.prev_percent = 0;
                di->bat_cap.permille = 0;
+               percent = 0;
                di->bat_cap.prev_mah = 0;
                di->bat_cap.mah = 0;
                changed = true;
        } else if (di->flags.fully_charged) {
                /*
                 * We report 100% if algorithm reported fully charged
-                * unless capacity drops too much
+                * and show 100% during maintenance charging (scaling).
                 */
                if (di->flags.force_full) {
-                       di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+                       di->bat_cap.prev_percent = percent;
                        di->bat_cap.prev_mah = di->bat_cap.mah;
-               } else if (!di->flags.force_full &&
-                       di->bat_cap.prev_percent !=
-                       (di->bat_cap.permille) / 10 &&
-                       (di->bat_cap.permille / 10) <
-                       di->bat->fg_params->maint_thres) {
+
+                       changed = true;
+
+                       if (!di->bat_cap.cap_scale.enable &&
+                                               di->bm->capacity_scaling) {
+                               di->bat_cap.cap_scale.enable = true;
+                               di->bat_cap.cap_scale.cap_to_scale[0] = 100;
+                               di->bat_cap.cap_scale.cap_to_scale[1] =
+                                               di->bat_cap.prev_percent;
+                               di->bat_cap.cap_scale.disable_cap_level = 100;
+                       }
+               } else if (di->bat_cap.prev_percent != percent) {
                        dev_dbg(di->dev,
                                "battery reported full "
                                "but capacity dropping: %d\n",
-                               di->bat_cap.permille / 10);
-                       di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+                               percent);
+                       di->bat_cap.prev_percent = percent;
                        di->bat_cap.prev_mah = di->bat_cap.mah;
 
                        changed = true;
                }
-       } else if (di->bat_cap.prev_percent != di->bat_cap.permille / 10) {
-               if (di->bat_cap.permille / 10 == 0) {
+       } else if (di->bat_cap.prev_percent != percent) {
+               if (percent == 0) {
                        /*
                         * We will not report 0% unless we've got
                         * the LOW_BAT IRQ, no matter what the FG
@@ -1203,11 +1357,11 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
                        di->bat_cap.permille = 1;
                        di->bat_cap.prev_mah = 1;
                        di->bat_cap.mah = 1;
+                       percent = 1;
 
                        changed = true;
                } else if (!(!di->flags.charging &&
-                       (di->bat_cap.permille / 10) >
-                       di->bat_cap.prev_percent) || init) {
+                       percent > di->bat_cap.prev_percent) || init) {
                        /*
                         * We do not allow reported capacity to go up
                         * unless we're charging or if we're in init
@@ -1215,9 +1369,9 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
                        dev_dbg(di->dev,
                                "capacity changed from %d to %d (%d)\n",
                                di->bat_cap.prev_percent,
-                               di->bat_cap.permille / 10,
+                               percent,
                                di->bat_cap.permille);
-                       di->bat_cap.prev_percent = di->bat_cap.permille / 10;
+                       di->bat_cap.prev_percent = percent;
                        di->bat_cap.prev_mah = di->bat_cap.mah;
 
                        changed = true;
@@ -1225,12 +1379,20 @@ static void ab8500_fg_check_capacity_limits(struct ab8500_fg *di, bool init)
                        dev_dbg(di->dev, "capacity not allowed to go up since "
                                "no charger is connected: %d to %d (%d)\n",
                                di->bat_cap.prev_percent,
-                               di->bat_cap.permille / 10,
+                               percent,
                                di->bat_cap.permille);
                }
        }
 
        if (changed) {
+               if (di->bm->capacity_scaling) {
+                       di->bat_cap.cap_scale.scaled_cap =
+                               ab8500_fg_calculate_scaled_capacity(di);
+
+                       dev_info(di->dev, "capacity=%d (%d)\n",
+                               di->bat_cap.prev_percent,
+                               di->bat_cap.cap_scale.scaled_cap);
+               }
                power_supply_changed(&di->fg_psy);
                if (di->flags.fully_charged && di->flags.force_full) {
                        dev_dbg(di->dev, "Battery full, notifying.\n");
@@ -1284,7 +1446,7 @@ static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
        switch (di->charge_state) {
        case AB8500_FG_CHARGE_INIT:
                di->fg_samples = SEC_TO_SAMPLE(
-                       di->bat->fg_params->accu_charging);
+                       di->bm->fg_params->accu_charging);
 
                ab8500_fg_coulomb_counter(di, true);
                ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_READOUT);
@@ -1296,7 +1458,7 @@ static void ab8500_fg_algorithm_charging(struct ab8500_fg *di)
                 * Read the FG and calculate the new capacity
                 */
                mutex_lock(&di->cc_lock);
-               if (!di->flags.conv_done) {
+               if (!di->flags.conv_done && !di->flags.force_full) {
                        /* Wasn't the CC IRQ that got us here */
                        mutex_unlock(&di->cc_lock);
                        dev_dbg(di->dev, "%s CC conv not done\n",
@@ -1346,8 +1508,8 @@ static bool check_sysfs_capacity(struct ab8500_fg *di)
        cap_permille = ab8500_fg_convert_mah_to_permille(di,
                di->bat_cap.user_mah);
 
-       lower = di->bat_cap.permille - di->bat->fg_params->user_cap_limit * 10;
-       upper = di->bat_cap.permille + di->bat->fg_params->user_cap_limit * 10;
+       lower = di->bat_cap.permille - di->bm->fg_params->user_cap_limit * 10;
+       upper = di->bat_cap.permille + di->bm->fg_params->user_cap_limit * 10;
 
        if (lower < 0)
                lower = 0;
@@ -1387,7 +1549,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
        case AB8500_FG_DISCHARGE_INIT:
                /* We use the FG IRQ to work on */
                di->init_cnt = 0;
-               di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+               di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
                ab8500_fg_coulomb_counter(di, true);
                ab8500_fg_discharge_state_to(di,
                        AB8500_FG_DISCHARGE_INITMEASURING);
@@ -1400,18 +1562,17 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
                 * samples to get an initial capacity.
                 * Then go to READOUT
                 */
-               sleep_time = di->bat->fg_params->init_timer;
+               sleep_time = di->bm->fg_params->init_timer;
 
                /* Discard the first [x] seconds */
-               if (di->init_cnt >
-                       di->bat->fg_params->init_discard_time) {
+               if (di->init_cnt > di->bm->fg_params->init_discard_time) {
                        ab8500_fg_calc_cap_discharge_voltage(di, true);
 
                        ab8500_fg_check_capacity_limits(di, true);
                }
 
                di->init_cnt += sleep_time;
-               if (di->init_cnt > di->bat->fg_params->init_total_time)
+               if (di->init_cnt > di->bm->fg_params->init_total_time)
                        ab8500_fg_discharge_state_to(di,
                                AB8500_FG_DISCHARGE_READOUT_INIT);
 
@@ -1426,7 +1587,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
                /* Intentional fallthrough */
 
        case AB8500_FG_DISCHARGE_RECOVERY:
-               sleep_time = di->bat->fg_params->recovery_sleep_timer;
+               sleep_time = di->bm->fg_params->recovery_sleep_timer;
 
                /*
                 * We should check the power consumption
@@ -1438,9 +1599,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
 
                if (ab8500_fg_is_low_curr(di, di->inst_curr)) {
                        if (di->recovery_cnt >
-                               di->bat->fg_params->recovery_total_time) {
+                               di->bm->fg_params->recovery_total_time) {
                                di->fg_samples = SEC_TO_SAMPLE(
-                                       di->bat->fg_params->accu_high_curr);
+                                       di->bm->fg_params->accu_high_curr);
                                ab8500_fg_coulomb_counter(di, true);
                                ab8500_fg_discharge_state_to(di,
                                        AB8500_FG_DISCHARGE_READOUT);
@@ -1453,7 +1614,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
                        di->recovery_cnt += sleep_time;
                } else {
                        di->fg_samples = SEC_TO_SAMPLE(
-                               di->bat->fg_params->accu_high_curr);
+                               di->bm->fg_params->accu_high_curr);
                        ab8500_fg_coulomb_counter(di, true);
                        ab8500_fg_discharge_state_to(di,
                                AB8500_FG_DISCHARGE_READOUT);
@@ -1462,7 +1623,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
 
        case AB8500_FG_DISCHARGE_READOUT_INIT:
                di->fg_samples = SEC_TO_SAMPLE(
-                       di->bat->fg_params->accu_high_curr);
+                       di->bm->fg_params->accu_high_curr);
                ab8500_fg_coulomb_counter(di, true);
                ab8500_fg_discharge_state_to(di,
                                AB8500_FG_DISCHARGE_READOUT);
@@ -1480,7 +1641,7 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
 
                        if (di->recovery_needed) {
                                ab8500_fg_discharge_state_to(di,
-                                       AB8500_FG_DISCHARGE_RECOVERY);
+                                       AB8500_FG_DISCHARGE_INIT_RECOVERY);
 
                                queue_delayed_work(di->fg_wq,
                                        &di->fg_periodic_work, 0);
@@ -1509,9 +1670,9 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
                        }
 
                        di->high_curr_cnt +=
-                               di->bat->fg_params->accu_high_curr;
+                               di->bm->fg_params->accu_high_curr;
                        if (di->high_curr_cnt >
-                               di->bat->fg_params->high_curr_time)
+                               di->bm->fg_params->high_curr_time)
                                di->recovery_needed = true;
 
                        ab8500_fg_calc_cap_discharge_fg(di);
@@ -1523,12 +1684,10 @@ static void ab8500_fg_algorithm_discharging(struct ab8500_fg *di)
 
        case AB8500_FG_DISCHARGE_WAKEUP:
                ab8500_fg_coulomb_counter(di, true);
-               di->inst_curr = ab8500_fg_inst_curr_blocking(di);
-
                ab8500_fg_calc_cap_discharge_voltage(di, true);
 
                di->fg_samples = SEC_TO_SAMPLE(
-                       di->bat->fg_params->accu_high_curr);
+                       di->bm->fg_params->accu_high_curr);
                ab8500_fg_coulomb_counter(di, true);
                ab8500_fg_discharge_state_to(di,
                                AB8500_FG_DISCHARGE_READOUT);
@@ -1641,8 +1800,6 @@ static void ab8500_fg_periodic_work(struct work_struct *work)
                fg_periodic_work.work);
 
        if (di->init_capacity) {
-               /* A dummy read that will return 0 */
-               di->inst_curr = ab8500_fg_inst_curr_blocking(di);
                /* Get an initial capacity calculation */
                ab8500_fg_calc_cap_discharge_voltage(di, true);
                ab8500_fg_check_capacity_limits(di, true);
@@ -1684,24 +1841,26 @@ static void ab8500_fg_check_hw_failure_work(struct work_struct *work)
         * If we have had a battery over-voltage situation,
         * check ovv-bit to see if it should be reset.
         */
-       if (di->flags.bat_ovv) {
-               ret = abx500_get_register_interruptible(di->dev,
-                       AB8500_CHARGER, AB8500_CH_STAT_REG,
-                       &reg_value);
-               if (ret < 0) {
-                       dev_err(di->dev, "%s ab8500 read failed\n", __func__);
-                       return;
-               }
-               if ((reg_value & BATT_OVV) != BATT_OVV) {
-                       dev_dbg(di->dev, "Battery recovered from OVV\n");
-                       di->flags.bat_ovv = false;
+       ret = abx500_get_register_interruptible(di->dev,
+               AB8500_CHARGER, AB8500_CH_STAT_REG,
+               &reg_value);
+       if (ret < 0) {
+               dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+               return;
+       }
+       if ((reg_value & BATT_OVV) == BATT_OVV) {
+               if (!di->flags.bat_ovv) {
+                       dev_dbg(di->dev, "Battery OVV\n");
+                       di->flags.bat_ovv = true;
                        power_supply_changed(&di->fg_psy);
-                       return;
                }
-
                /* Not yet recovered from ovv, reschedule this test */
                queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work,
-                                  round_jiffies(HZ));
+                                  HZ);
+               } else {
+                       dev_dbg(di->dev, "Battery recovered from OVV\n");
+                       di->flags.bat_ovv = false;
+                       power_supply_changed(&di->fg_psy);
        }
 }
 
@@ -1721,26 +1880,30 @@ static void ab8500_fg_low_bat_work(struct work_struct *work)
        vbat = ab8500_fg_bat_voltage(di);
 
        /* Check if LOW_BAT still fulfilled */
-       if (vbat < di->bat->fg_params->lowbat_threshold) {
-               di->flags.low_bat = true;
-               dev_warn(di->dev, "Battery voltage still LOW\n");
-
-               /*
-                * We need to re-schedule this check to be able to detect
-                * if the voltage increases again during charging
-                */
-               queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
-                       round_jiffies(LOW_BAT_CHECK_INTERVAL));
+       if (vbat < di->bm->fg_params->lowbat_threshold) {
+               /* Is it time to shut down? */
+               if (di->low_bat_cnt < 1) {
+                       di->flags.low_bat = true;
+                       dev_warn(di->dev, "Shut down pending...\n");
+               } else {
+                       /*
+                       * Else we need to re-schedule this check to be able to detect
+                       * if the voltage increases again during charging or
+                       * due to decreasing load.
+                       */
+                       di->low_bat_cnt--;
+                       dev_warn(di->dev, "Battery voltage still LOW\n");
+                       queue_delayed_work(di->fg_wq, &di->fg_low_bat_work,
+                               round_jiffies(LOW_BAT_CHECK_INTERVAL));
+               }
        } else {
-               di->flags.low_bat = false;
+               di->flags.low_bat_delay = false;
+               di->low_bat_cnt = 10;
                dev_warn(di->dev, "Battery voltage OK again\n");
        }
 
        /* This is needed to dispatch LOW_BAT */
        ab8500_fg_check_capacity_limits(di, false);
-
-       /* Set this flag to check if LOW_BAT IRQ still occurs */
-       di->flags.low_bat_delay = false;
 }
 
 /**
@@ -1779,8 +1942,8 @@ static int ab8500_fg_battok_init_hw_register(struct ab8500_fg *di)
        int ret;
        int new_val;
 
-       sel0 = di->bat->fg_params->battok_falling_th_sel0;
-       sel1 = di->bat->fg_params->battok_raising_th_sel1;
+       sel0 = di->bm->fg_params->battok_falling_th_sel0;
+       sel1 = di->bm->fg_params->battok_raising_th_sel1;
 
        cbp_sel0 = ab8500_fg_battok_calc(di, sel0);
        cbp_sel1 = ab8500_fg_battok_calc(di, sel1);
@@ -1828,7 +1991,13 @@ static void ab8500_fg_instant_work(struct work_struct *work)
 static irqreturn_t ab8500_fg_cc_data_end_handler(int irq, void *_di)
 {
        struct ab8500_fg *di = _di;
-       complete(&di->ab8500_fg_complete);
+       if (!di->nbr_cceoc_irq_cnt) {
+               di->nbr_cceoc_irq_cnt++;
+               complete(&di->ab8500_fg_started);
+       } else {
+               di->nbr_cceoc_irq_cnt = 0;
+               complete(&di->ab8500_fg_complete);
+       }
        return IRQ_HANDLED;
 }
 
@@ -1875,8 +2044,6 @@ static irqreturn_t ab8500_fg_batt_ovv_handler(int irq, void *_di)
        struct ab8500_fg *di = _di;
 
        dev_dbg(di->dev, "Battery OVV\n");
-       di->flags.bat_ovv = true;
-       power_supply_changed(&di->fg_psy);
 
        /* Schedule a new HW failure check */
        queue_delayed_work(di->fg_wq, &di->fg_check_hw_failure_work, 0);
@@ -1895,6 +2062,7 @@ static irqreturn_t ab8500_fg_lowbatf_handler(int irq, void *_di)
 {
        struct ab8500_fg *di = _di;
 
+       /* Initiate handling in ab8500_fg_low_bat_work() if not already initiated. */
        if (!di->flags.low_bat_delay) {
                dev_warn(di->dev, "Battery voltage is below LOW threshold\n");
                di->flags.low_bat_delay = true;
@@ -1963,7 +2131,7 @@ static int ab8500_fg_get_property(struct power_supply *psy,
                                di->bat_cap.max_mah);
                break;
        case POWER_SUPPLY_PROP_ENERGY_NOW:
-               if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+               if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
                                di->flags.batt_id_received)
                        val->intval = ab8500_fg_convert_mah_to_uwh(di,
                                        di->bat_cap.max_mah);
@@ -1978,21 +2146,23 @@ static int ab8500_fg_get_property(struct power_supply *psy,
                val->intval = di->bat_cap.max_mah;
                break;
        case POWER_SUPPLY_PROP_CHARGE_NOW:
-               if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+               if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
                                di->flags.batt_id_received)
                        val->intval = di->bat_cap.max_mah;
                else
                        val->intval = di->bat_cap.prev_mah;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+               if (di->bm->capacity_scaling)
+                       val->intval = di->bat_cap.cap_scale.scaled_cap;
+               else if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
                                di->flags.batt_id_received)
                        val->intval = 100;
                else
                        val->intval = di->bat_cap.prev_percent;
                break;
        case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
-               if (di->flags.batt_unknown && !di->bat->chg_unknown_bat &&
+               if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
                                di->flags.batt_id_received)
                        val->intval = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
                else
@@ -2049,6 +2219,8 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                                                break;
                                        di->flags.charging = false;
                                        di->flags.fully_charged = false;
+                                       if (di->bm->capacity_scaling)
+                                               ab8500_fg_update_cap_scalers(di);
                                        queue_work(di->fg_wq, &di->fg_work);
                                        break;
                                case POWER_SUPPLY_STATUS_FULL:
@@ -2061,10 +2233,13 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                                        queue_work(di->fg_wq, &di->fg_work);
                                        break;
                                case POWER_SUPPLY_STATUS_CHARGING:
-                                       if (di->flags.charging)
+                                       if (di->flags.charging &&
+                                               !di->flags.fully_charged)
                                                break;
                                        di->flags.charging = true;
                                        di->flags.fully_charged = false;
+                                       if (di->bm->capacity_scaling)
+                                               ab8500_fg_update_cap_scalers(di);
                                        queue_work(di->fg_wq, &di->fg_work);
                                        break;
                                };
@@ -2075,10 +2250,11 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                case POWER_SUPPLY_PROP_TECHNOLOGY:
                        switch (ext->type) {
                        case POWER_SUPPLY_TYPE_BATTERY:
-                               if (!di->flags.batt_id_received) {
+                               if (!di->flags.batt_id_received &&
+                                   di->bm->batt_id != BATTERY_UNKNOWN) {
                                        const struct abx500_battery_type *b;
 
-                                       b = &(di->bat->bat_type[di->bat->batt_id]);
+                                       b = &(di->bm->bat_type[di->bm->batt_id]);
 
                                        di->flags.batt_id_received = true;
 
@@ -2104,8 +2280,8 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data)
                case POWER_SUPPLY_PROP_TEMP:
                        switch (ext->type) {
                        case POWER_SUPPLY_TYPE_BATTERY:
-                           if (di->flags.batt_id_received)
-                               di->bat_temp = ret.intval;
+                               if (di->flags.batt_id_received)
+                                       di->bat_temp = ret.intval;
                                break;
                        default:
                                break;
@@ -2155,7 +2331,7 @@ static int ab8500_fg_init_hw_registers(struct ab8500_fg *di)
                AB8500_SYS_CTRL2_BLOCK,
                AB8500_LOW_BAT_REG,
                ab8500_volt_to_regval(
-                       di->bat->fg_params->lowbat_threshold) << 1 |
+                       di->bm->fg_params->lowbat_threshold) << 1 |
                LOW_BAT_ENABLE);
        if (ret) {
                dev_err(di->dev, "%s write failed\n", __func__);
@@ -2395,6 +2571,11 @@ static int ab8500_fg_suspend(struct platform_device *pdev,
        struct ab8500_fg *di = platform_get_drvdata(pdev);
 
        flush_delayed_work(&di->fg_periodic_work);
+       flush_work(&di->fg_work);
+       flush_work(&di->fg_acc_cur_work);
+       flush_delayed_work(&di->fg_reinit_work);
+       flush_delayed_work(&di->fg_low_bat_work);
+       flush_delayed_work(&di->fg_check_hw_failure_work);
 
        /*
         * If the FG is enabled we will disable it before going to suspend
@@ -2448,6 +2629,7 @@ static char *supply_interface[] = {
 static int ab8500_fg_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct abx500_bm_data *plat = pdev->dev.platform_data;
        struct ab8500_fg *di;
        int i, irq;
        int ret = 0;
@@ -2457,21 +2639,19 @@ static int ab8500_fg_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__);
                return -ENOMEM;
        }
-       di->bat = pdev->mfd_cell->platform_data;
-       if (!di->bat) {
-               if (np) {
-                       ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "failed to get battery information\n");
-                               return ret;
-                       }
-               } else {
-                       dev_err(&pdev->dev, "missing dt node for ab8500_fg\n");
-                       return -EINVAL;
+
+       if (!plat) {
+               dev_err(&pdev->dev, "no battery management data supplied\n");
+               return -EINVAL;
+       }
+       di->bm = plat;
+
+       if (np) {
+               ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get battery information\n");
+                       return ret;
                }
-       } else {
-               dev_info(&pdev->dev, "falling back to legacy platform data\n");
        }
 
        mutex_init(&di->cc_lock);
@@ -2491,11 +2671,11 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        di->fg_psy.external_power_changed = ab8500_fg_external_power_changed;
 
        di->bat_cap.max_mah_design = MILLI_TO_MICRO *
-               di->bat->bat_type[di->bat->batt_id].charge_full_design;
+               di->bm->bat_type[di->bm->batt_id].charge_full_design;
 
        di->bat_cap.max_mah = di->bat_cap.max_mah_design;
 
-       di->vbat_nom = di->bat->bat_type[di->bat->batt_id].nominal_voltage;
+       di->vbat_nom = di->bm->bat_type[di->bm->batt_id].nominal_voltage;
 
        di->init_capacity = true;
 
@@ -2531,6 +2711,12 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
                ab8500_fg_check_hw_failure_work);
 
+       /* Reset battery low voltage flag */
+       di->flags.low_bat = false;
+
+       /* Initialize low battery counter */
+       di->low_bat_cnt = 10;
+
        /* Initialize OVV, and other registers */
        ret = ab8500_fg_init_hw_registers(di);
        if (ret) {
@@ -2549,10 +2735,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
                goto free_inst_curr_wq;
        }
 
-       di->fg_samples = SEC_TO_SAMPLE(di->bat->fg_params->init_timer);
+       di->fg_samples = SEC_TO_SAMPLE(di->bm->fg_params->init_timer);
        ab8500_fg_coulomb_counter(di, true);
 
-       /* Initialize completion used to notify completion of inst current */
+       /*
+        * Initialize completion used to notify completion and start
+        * of inst current
+        */
+       init_completion(&di->ab8500_fg_started);
        init_completion(&di->ab8500_fg_complete);
 
        /* Register interrupts */
@@ -2572,6 +2762,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
        }
        di->irq = platform_get_irq_byname(pdev, "CCEOC");
        disable_irq(di->irq);
+       di->nbr_cceoc_irq_cnt = 0;
 
        platform_set_drvdata(pdev, di);
 
index 2970891460641177f53449dc1ff32b33ed26a375..f043c0851a7600d1417711b7f1ec4364e9331985 100644 (file)
@@ -33,9 +33,6 @@
 /* End-of-charge criteria counter */
 #define EOC_COND_CNT                   10
 
-/* Recharge criteria counter */
-#define RCH_COND_CNT                   3
-
 #define to_abx500_chargalg_device_info(x) container_of((x), \
        struct abx500_chargalg, chargalg_psy);
 
@@ -196,7 +193,6 @@ enum maxim_ret {
  * @dev:               pointer to the structure device
  * @charge_status:     battery operating status
  * @eoc_cnt:           counter used to determine end-of_charge
- * @rch_cnt:           counter used to determine start of recharge
  * @maintenance_chg:   indicate if maintenance charge is active
  * @t_hyst_norm                temperature hysteresis when the temperature has been
  *                     over or under normal limits
@@ -207,7 +203,7 @@ enum maxim_ret {
  * @chg_info:          information about connected charger types
  * @batt_data:         data of the battery
  * @susp_status:       current charger suspension status
- * @bat:               pointer to the abx500_bm platform data
+ * @bm:                Platform specific battery management information
  * @chargalg_psy:      structure that holds the battery properties exposed by
  *                     the charging algorithm
  * @events:            structure for information about events triggered
@@ -223,7 +219,6 @@ struct abx500_chargalg {
        struct device *dev;
        int charge_status;
        int eoc_cnt;
-       int rch_cnt;
        bool maintenance_chg;
        int t_hyst_norm;
        int t_hyst_lowhigh;
@@ -232,7 +227,7 @@ struct abx500_chargalg {
        struct abx500_chargalg_charger_info chg_info;
        struct abx500_chargalg_battery_data batt_data;
        struct abx500_chargalg_suspension_status susp_status;
-       struct abx500_bm_data *bat;
+       struct abx500_bm_data *bm;
        struct power_supply chargalg_psy;
        struct ux500_charger *ac_chg;
        struct ux500_charger *usb_chg;
@@ -367,13 +362,13 @@ static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
        case AC_CHG:
                timer_expiration =
                round_jiffies(jiffies +
-                       (di->bat->main_safety_tmr_h * 3600 * HZ));
+                       (di->bm->main_safety_tmr_h * 3600 * HZ));
                break;
 
        case USB_CHG:
                timer_expiration =
                round_jiffies(jiffies +
-                       (di->bat->usb_safety_tmr_h * 3600 * HZ));
+                       (di->bm->usb_safety_tmr_h * 3600 * HZ));
                break;
 
        default:
@@ -450,8 +445,18 @@ static int abx500_chargalg_kick_watchdog(struct abx500_chargalg *di)
 {
        /* Check if charger exists and kick watchdog if charging */
        if (di->ac_chg && di->ac_chg->ops.kick_wd &&
-                       di->chg_info.online_chg & AC_CHG)
+           di->chg_info.online_chg & AC_CHG) {
+               /*
+                * If AB charger watchdog expired, pm2xxx charging
+                * gets disabled. To be safe, kick both AB charger watchdog
+                * and pm2xxx watchdog.
+                */
+               if (di->ac_chg->external &&
+                   di->usb_chg && di->usb_chg->ops.kick_wd)
+                       di->usb_chg->ops.kick_wd(di->usb_chg);
+
                return di->ac_chg->ops.kick_wd(di->ac_chg);
+       }
        else if (di->usb_chg && di->usb_chg->ops.kick_wd &&
                        di->chg_info.online_chg & USB_CHG)
                return di->usb_chg->ops.kick_wd(di->usb_chg);
@@ -608,6 +613,8 @@ static void abx500_chargalg_hold_charging(struct abx500_chargalg *di)
 static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
        int vset, int iset)
 {
+       bool start_chargalg_wd = true;
+
        switch (di->chg_info.charger_type) {
        case AC_CHG:
                dev_dbg(di->dev,
@@ -625,8 +632,12 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
 
        default:
                dev_err(di->dev, "Unknown charger to charge from\n");
+               start_chargalg_wd = false;
                break;
        }
+
+       if (start_chargalg_wd && !delayed_work_pending(&di->chargalg_wd_work))
+               queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
 }
 
 /**
@@ -638,32 +649,32 @@ static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
  */
 static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
 {
-       if (di->batt_data.temp > (di->bat->temp_low + di->t_hyst_norm) &&
-               di->batt_data.temp < (di->bat->temp_high - di->t_hyst_norm)) {
+       if (di->batt_data.temp > (di->bm->temp_low + di->t_hyst_norm) &&
+               di->batt_data.temp < (di->bm->temp_high - di->t_hyst_norm)) {
                /* Temp OK! */
                di->events.btemp_underover = false;
                di->events.btemp_lowhigh = false;
                di->t_hyst_norm = 0;
                di->t_hyst_lowhigh = 0;
        } else {
-               if (((di->batt_data.temp >= di->bat->temp_high) &&
+               if (((di->batt_data.temp >= di->bm->temp_high) &&
                        (di->batt_data.temp <
-                               (di->bat->temp_over - di->t_hyst_lowhigh))) ||
+                               (di->bm->temp_over - di->t_hyst_lowhigh))) ||
                        ((di->batt_data.temp >
-                               (di->bat->temp_under + di->t_hyst_lowhigh)) &&
-                       (di->batt_data.temp <= di->bat->temp_low))) {
+                               (di->bm->temp_under + di->t_hyst_lowhigh)) &&
+                       (di->batt_data.temp <= di->bm->temp_low))) {
                        /* TEMP minor!!!!! */
                        di->events.btemp_underover = false;
                        di->events.btemp_lowhigh = true;
-                       di->t_hyst_norm = di->bat->temp_hysteresis;
+                       di->t_hyst_norm = di->bm->temp_hysteresis;
                        di->t_hyst_lowhigh = 0;
-               } else if (di->batt_data.temp <= di->bat->temp_under ||
-                       di->batt_data.temp >= di->bat->temp_over) {
+               } else if (di->batt_data.temp <= di->bm->temp_under ||
+                       di->batt_data.temp >= di->bm->temp_over) {
                        /* TEMP major!!!!! */
                        di->events.btemp_underover = true;
                        di->events.btemp_lowhigh = false;
                        di->t_hyst_norm = 0;
-                       di->t_hyst_lowhigh = di->bat->temp_hysteresis;
+                       di->t_hyst_lowhigh = di->bm->temp_hysteresis;
                } else {
                /* Within hysteresis */
                dev_dbg(di->dev, "Within hysteresis limit temp: %d "
@@ -682,12 +693,12 @@ static void abx500_chargalg_check_temp(struct abx500_chargalg *di)
  */
 static void abx500_chargalg_check_charger_voltage(struct abx500_chargalg *di)
 {
-       if (di->chg_info.usb_volt > di->bat->chg_params->usb_volt_max)
+       if (di->chg_info.usb_volt > di->bm->chg_params->usb_volt_max)
                di->chg_info.usb_chg_ok = false;
        else
                di->chg_info.usb_chg_ok = true;
 
-       if (di->chg_info.ac_volt > di->bat->chg_params->ac_volt_max)
+       if (di->chg_info.ac_volt > di->bm->chg_params->ac_volt_max)
                di->chg_info.ac_chg_ok = false;
        else
                di->chg_info.ac_chg_ok = true;
@@ -707,10 +718,10 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
        if (di->charge_status == POWER_SUPPLY_STATUS_CHARGING &&
                di->charge_state == STATE_NORMAL &&
                !di->maintenance_chg && (di->batt_data.volt >=
-               di->bat->bat_type[di->bat->batt_id].termination_vol ||
+               di->bm->bat_type[di->bm->batt_id].termination_vol ||
                di->events.usb_cv_active || di->events.ac_cv_active) &&
                di->batt_data.avg_curr <
-               di->bat->bat_type[di->bat->batt_id].termination_curr &&
+               di->bm->bat_type[di->bm->batt_id].termination_curr &&
                di->batt_data.avg_curr > 0) {
                if (++di->eoc_cnt >= EOC_COND_CNT) {
                        di->eoc_cnt = 0;
@@ -733,12 +744,12 @@ static void abx500_chargalg_end_of_charge(struct abx500_chargalg *di)
 static void init_maxim_chg_curr(struct abx500_chargalg *di)
 {
        di->ccm.original_iset =
-               di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
+               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
        di->ccm.current_iset =
-               di->bat->bat_type[di->bat->batt_id].normal_cur_lvl;
-       di->ccm.test_delta_i = di->bat->maxi->charger_curr_step;
-       di->ccm.max_current = di->bat->maxi->chg_curr;
-       di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+               di->bm->bat_type[di->bm->batt_id].normal_cur_lvl;
+       di->ccm.test_delta_i = di->bm->maxi->charger_curr_step;
+       di->ccm.max_current = di->bm->maxi->chg_curr;
+       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
        di->ccm.level = 0;
 }
 
@@ -755,7 +766,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
 {
        int delta_i;
 
-       if (!di->bat->maxi->ena_maxi)
+       if (!di->bm->maxi->ena_maxi)
                return MAXIM_RET_NOACTION;
 
        delta_i = di->ccm.original_iset - di->batt_data.inst_curr;
@@ -766,7 +777,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
                if (di->ccm.wait_cnt == 0) {
                        dev_dbg(di->dev, "lowering current\n");
                        di->ccm.wait_cnt++;
-                       di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
                        di->ccm.max_current =
                                di->ccm.current_iset - di->ccm.test_delta_i;
                        di->ccm.current_iset = di->ccm.max_current;
@@ -791,7 +802,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
                if (di->ccm.current_iset == di->ccm.original_iset)
                        return MAXIM_RET_NOACTION;
 
-               di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
                di->ccm.current_iset = di->ccm.original_iset;
                di->ccm.level = 0;
 
@@ -803,7 +814,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
                di->ccm.max_current) {
                if (di->ccm.condition_cnt-- == 0) {
                        /* Increse the iset with cco.test_delta_i */
-                       di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+                       di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
                        di->ccm.current_iset += di->ccm.test_delta_i;
                        di->ccm.level++;
                        dev_dbg(di->dev, " Maximization needed, increase"
@@ -818,7 +829,7 @@ static enum maxim_ret abx500_chargalg_chg_curr_maxim(struct abx500_chargalg *di)
                        return MAXIM_RET_NOACTION;
                }
        }  else {
-               di->ccm.condition_cnt = di->bat->maxi->wait_cycles;
+               di->ccm.condition_cnt = di->bm->maxi->wait_cycles;
                return MAXIM_RET_NOACTION;
        }
 }
@@ -838,7 +849,7 @@ static void handle_maxim_chg_curr(struct abx500_chargalg *di)
                break;
        case MAXIM_RET_IBAT_TOO_HIGH:
                result = abx500_chargalg_update_chg_curr(di,
-                       di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
                if (result)
                        dev_err(di->dev, "failed to set chg curr\n");
                break;
@@ -858,6 +869,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
        union power_supply_propval ret;
        int i, j;
        bool psy_found = false;
+       bool capacity_updated = false;
 
        psy = (struct power_supply *)data;
        ext = dev_get_drvdata(dev);
@@ -870,6 +882,16 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
        if (!psy_found)
                return 0;
 
+       /*
+        *  If external is not registering 'POWER_SUPPLY_PROP_CAPACITY' to its
+        * property because of handling that sysfs entry on its own, this is
+        * the place to get the battery capacity.
+        */
+       if (!ext->get_property(ext, POWER_SUPPLY_PROP_CAPACITY, &ret)) {
+               di->batt_data.percent = ret.intval;
+               capacity_updated = true;
+       }
+
        /* Go through all properties for the psy */
        for (j = 0; j < ext->num_properties; j++) {
                enum power_supply_property prop;
@@ -1154,7 +1176,8 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data)
                        }
                        break;
                case POWER_SUPPLY_PROP_CAPACITY:
-                       di->batt_data.percent = ret.intval;
+                       if (!capacity_updated)
+                               di->batt_data.percent = ret.intval;
                        break;
                default:
                        break;
@@ -1210,7 +1233,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
         * this way
         */
        if (!charger_status ||
-               (di->events.batt_unknown && !di->bat->chg_unknown_bat)) {
+               (di->events.batt_unknown && !di->bm->chg_unknown_bat)) {
                if (di->charge_state != STATE_HANDHELD) {
                        di->events.safety_timer_expired = false;
                        abx500_chargalg_state_to(di, STATE_HANDHELD_INIT);
@@ -1394,8 +1417,8 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        case STATE_NORMAL_INIT:
                abx500_chargalg_start_charging(di,
-                       di->bat->bat_type[di->bat->batt_id].normal_vol_lvl,
-                       di->bat->bat_type[di->bat->batt_id].normal_cur_lvl);
+                       di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                       di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
                abx500_chargalg_state_to(di, STATE_NORMAL);
                abx500_chargalg_start_safety_timer(di);
                abx500_chargalg_stop_maintenance_timer(di);
@@ -1411,7 +1434,7 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
                handle_maxim_chg_curr(di);
                if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
                        di->maintenance_chg) {
-                       if (di->bat->no_maintenance)
+                       if (di->bm->no_maintenance)
                                abx500_chargalg_state_to(di,
                                        STATE_WAIT_FOR_RECHARGE_INIT);
                        else
@@ -1424,28 +1447,25 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
        case STATE_WAIT_FOR_RECHARGE_INIT:
                abx500_chargalg_hold_charging(di);
                abx500_chargalg_state_to(di, STATE_WAIT_FOR_RECHARGE);
-               di->rch_cnt = RCH_COND_CNT;
                /* Intentional fallthrough */
 
        case STATE_WAIT_FOR_RECHARGE:
-               if (di->batt_data.volt <=
-                       di->bat->bat_type[di->bat->batt_id].recharge_vol) {
-                       if (di->rch_cnt-- == 0)
-                               abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
-               } else
-                       di->rch_cnt = RCH_COND_CNT;
+               if (di->batt_data.percent <=
+                   di->bm->bat_type[di->bm->batt_id].
+                   recharge_cap)
+                       abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
                break;
 
        case STATE_MAINTENANCE_A_INIT:
                abx500_chargalg_stop_safety_timer(di);
                abx500_chargalg_start_maintenance_timer(di,
-                       di->bat->bat_type[
-                               di->bat->batt_id].maint_a_chg_timer_h);
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_chg_timer_h);
                abx500_chargalg_start_charging(di,
-                       di->bat->bat_type[
-                               di->bat->batt_id].maint_a_vol_lvl,
-                       di->bat->bat_type[
-                               di->bat->batt_id].maint_a_cur_lvl);
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_a_cur_lvl);
                abx500_chargalg_state_to(di, STATE_MAINTENANCE_A);
                power_supply_changed(&di->chargalg_psy);
                /* Intentional fallthrough*/
@@ -1459,13 +1479,13 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        case STATE_MAINTENANCE_B_INIT:
                abx500_chargalg_start_maintenance_timer(di,
-                       di->bat->bat_type[
-                               di->bat->batt_id].maint_b_chg_timer_h);
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_chg_timer_h);
                abx500_chargalg_start_charging(di,
-                       di->bat->bat_type[
-                               di->bat->batt_id].maint_b_vol_lvl,
-                       di->bat->bat_type[
-                               di->bat->batt_id].maint_b_cur_lvl);
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].maint_b_cur_lvl);
                abx500_chargalg_state_to(di, STATE_MAINTENANCE_B);
                power_supply_changed(&di->chargalg_psy);
                /* Intentional fallthrough*/
@@ -1479,10 +1499,10 @@ static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 
        case STATE_TEMP_LOWHIGH_INIT:
                abx500_chargalg_start_charging(di,
-                       di->bat->bat_type[
-                               di->bat->batt_id].low_high_vol_lvl,
-                       di->bat->bat_type[
-                               di->bat->batt_id].low_high_cur_lvl);
+                       di->bm->bat_type[
+                               di->bm->batt_id].low_high_vol_lvl,
+                       di->bm->bat_type[
+                               di->bm->batt_id].low_high_cur_lvl);
                abx500_chargalg_stop_maintenance_timer(di);
                di->charge_status = POWER_SUPPLY_STATUS_CHARGING;
                abx500_chargalg_state_to(di, STATE_TEMP_LOWHIGH);
@@ -1543,11 +1563,11 @@ static void abx500_chargalg_periodic_work(struct work_struct *work)
        if (di->chg_info.conn_chg)
                queue_delayed_work(di->chargalg_wq,
                        &di->chargalg_periodic_work,
-                       di->bat->interval_charging * HZ);
+                       di->bm->interval_charging * HZ);
        else
                queue_delayed_work(di->chargalg_wq,
                        &di->chargalg_periodic_work,
-                       di->bat->interval_not_charging * HZ);
+                       di->bm->interval_not_charging * HZ);
 }
 
 /**
@@ -1614,10 +1634,13 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
                if (di->events.batt_ovv) {
                        val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
                } else if (di->events.btemp_underover) {
-                       if (di->batt_data.temp <= di->bat->temp_under)
+                       if (di->batt_data.temp <= di->bm->temp_under)
                                val->intval = POWER_SUPPLY_HEALTH_COLD;
                        else
                                val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+               } else if (di->charge_state == STATE_SAFETY_TIMER_EXPIRED ||
+                          di->charge_state == STATE_SAFETY_TIMER_EXPIRED_INIT) {
+                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
                } else {
                        val->intval = POWER_SUPPLY_HEALTH_GOOD;
                }
@@ -1630,6 +1653,25 @@ static int abx500_chargalg_get_property(struct power_supply *psy,
 
 /* Exposure to the sysfs interface */
 
+/**
+ * abx500_chargalg_sysfs_show() - sysfs show operations
+ * @kobj:      pointer to the struct kobject
+ * @attr:      pointer to the struct attribute
+ * @buf:       buffer that holds the parameter to send to userspace
+ *
+ * Returns a buffer to be displayed in user space
+ */
+static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
+                                         struct attribute *attr, char *buf)
+{
+       struct abx500_chargalg *di = container_of(kobj,
+               struct abx500_chargalg, chargalg_kobject);
+
+       return sprintf(buf, "%d\n",
+                      di->susp_status.ac_suspended &&
+                      di->susp_status.usb_suspended);
+}
+
 /**
  * abx500_chargalg_sysfs_charger() - sysfs store operations
  * @kobj:      pointer to the struct kobject
@@ -1698,7 +1740,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
 static struct attribute abx500_chargalg_en_charger = \
 {
        .name = "chargalg",
-       .mode = S_IWUGO,
+       .mode = S_IRUGO | S_IWUSR,
 };
 
 static struct attribute *abx500_chargalg_chg[] = {
@@ -1707,6 +1749,7 @@ static struct attribute *abx500_chargalg_chg[] = {
 };
 
 static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
+       .show = abx500_chargalg_sysfs_show,
        .store = abx500_chargalg_sysfs_charger,
 };
 
@@ -1806,6 +1849,7 @@ static char *supply_interface[] = {
 static int abx500_chargalg_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct abx500_bm_data *plat = pdev->dev.platform_data;
        struct abx500_chargalg *di;
        int ret = 0;
 
@@ -1814,21 +1858,19 @@ static int abx500_chargalg_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "%s no mem for ab8500_chargalg\n", __func__);
                return -ENOMEM;
        }
-       di->bat = pdev->mfd_cell->platform_data;
-       if (!di->bat) {
-               if (np) {
-                       ret = bmdevs_of_probe(&pdev->dev, np, &di->bat);
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "failed to get battery information\n");
-                               return ret;
-                       }
-               } else {
-                       dev_err(&pdev->dev, "missing dt node for ab8500_chargalg\n");
-                       return -EINVAL;
+
+       if (!plat) {
+               dev_err(&pdev->dev, "no battery management data supplied\n");
+               return -EINVAL;
+       }
+       di->bm = plat;
+
+       if (np) {
+               ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to get battery information\n");
+                       return ret;
                }
-       } else {
-               dev_info(&pdev->dev, "falling back to legacy platform data\n");
        }
 
        /* get device struct */
index ee842b37f462c8a1f0a89eb828173ccb874cc25a..ca91396fc48eef616f812bf97e12696a038c61a2 100644 (file)
@@ -28,7 +28,6 @@
  * http://www.ti.com/product/bq24155
  */
 
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/param.h>
@@ -734,12 +733,10 @@ static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
        int charger = 0;
        int boost = 0;
 
-       if (mode == BQ2415X_MODE_HOST_CHARGER ||
-               mode == BQ2415X_MODE_DEDICATED_CHARGER)
-                       charger = 1;
-
        if (mode == BQ2415X_MODE_BOOST)
                boost = 1;
+       else if (mode != BQ2415X_MODE_OFF)
+               charger = 1;
 
        if (!charger)
                ret = bq2415x_exec_command(bq, BQ2415X_CHARGER_DISABLE);
@@ -751,6 +748,10 @@ static int bq2415x_set_mode(struct bq2415x_device *bq, enum bq2415x_mode mode)
                return ret;
 
        switch (mode) {
+       case BQ2415X_MODE_OFF:
+               dev_dbg(bq->dev, "changing mode to: Offline\n");
+               ret = bq2415x_set_current_limit(bq, 100);
+               break;
        case BQ2415X_MODE_NONE:
                dev_dbg(bq->dev, "changing mode to: N/A\n");
                ret = bq2415x_set_current_limit(bq, 100);
@@ -843,7 +844,7 @@ static void bq2415x_timer_error(struct bq2415x_device *bq, const char *msg)
        dev_err(bq->dev, "%s\n", msg);
        if (bq->automode > 0)
                bq->automode = 0;
-       bq2415x_set_mode(bq, BQ2415X_MODE_NONE);
+       bq2415x_set_mode(bq, BQ2415X_MODE_OFF);
        bq2415x_set_autotimer(bq, 0);
 }
 
@@ -1136,6 +1137,10 @@ static ssize_t bq2415x_sysfs_set_mode(struct device *dev,
                        return -ENOSYS;
                bq->automode = 1;
                mode = bq->reported_mode;
+       } else if (strncmp(buf, "off", 3) == 0) {
+               if (bq->automode > 0)
+                       bq->automode = 0;
+               mode = BQ2415X_MODE_OFF;
        } else if (strncmp(buf, "none", 4) == 0) {
                if (bq->automode > 0)
                        bq->automode = 0;
@@ -1183,6 +1188,9 @@ static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
                ret += sprintf(buf+ret, "auto (");
 
        switch (bq->mode) {
+       case BQ2415X_MODE_OFF:
+               ret += sprintf(buf+ret, "off");
+               break;
        case BQ2415X_MODE_NONE:
                ret += sprintf(buf+ret, "none");
                break;
@@ -1217,6 +1225,8 @@ static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
                return -EINVAL;
 
        switch (bq->reported_mode) {
+       case BQ2415X_MODE_OFF:
+               return sprintf(buf, "off\n");
        case BQ2415X_MODE_NONE:
                return sprintf(buf, "none\n");
        case BQ2415X_MODE_HOST_CHARGER:
@@ -1523,7 +1533,7 @@ static int bq2415x_probe(struct i2c_client *client,
                goto error_1;
        }
 
-       bq = kzalloc(sizeof(*bq), GFP_KERNEL);
+       bq = devm_kzalloc(&client->dev, sizeof(*bq), GFP_KERNEL);
        if (!bq) {
                dev_err(&client->dev, "failed to allocate device data\n");
                ret = -ENOMEM;
@@ -1536,8 +1546,8 @@ static int bq2415x_probe(struct i2c_client *client,
        bq->dev = &client->dev;
        bq->chip = id->driver_data;
        bq->name = name;
-       bq->mode = BQ2415X_MODE_NONE;
-       bq->reported_mode = BQ2415X_MODE_NONE;
+       bq->mode = BQ2415X_MODE_OFF;
+       bq->reported_mode = BQ2415X_MODE_OFF;
        bq->autotimer = 0;
        bq->automode = 0;
 
@@ -1549,19 +1559,19 @@ static int bq2415x_probe(struct i2c_client *client,
        ret = bq2415x_power_supply_init(bq);
        if (ret) {
                dev_err(bq->dev, "failed to register power supply: %d\n", ret);
-               goto error_3;
+               goto error_2;
        }
 
        ret = bq2415x_sysfs_init(bq);
        if (ret) {
                dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret);
-               goto error_4;
+               goto error_3;
        }
 
        ret = bq2415x_set_defaults(bq);
        if (ret) {
                dev_err(bq->dev, "failed to set default values: %d\n", ret);
-               goto error_5;
+               goto error_4;
        }
 
        if (bq->init_data.set_mode_hook) {
@@ -1585,12 +1595,10 @@ static int bq2415x_probe(struct i2c_client *client,
        dev_info(bq->dev, "driver registered\n");
        return 0;
 
-error_5:
-       bq2415x_sysfs_exit(bq);
 error_4:
-       bq2415x_power_supply_exit(bq);
+       bq2415x_sysfs_exit(bq);
 error_3:
-       kfree(bq);
+       bq2415x_power_supply_exit(bq);
 error_2:
        kfree(name);
 error_1:
@@ -1622,7 +1630,6 @@ static int bq2415x_remove(struct i2c_client *client)
        dev_info(bq->dev, "driver unregistered\n");
 
        kfree(bq->name);
-       kfree(bq);
 
        return 0;
 }
@@ -1652,18 +1659,7 @@ static struct i2c_driver bq2415x_driver = {
        .remove = bq2415x_remove,
        .id_table = bq2415x_i2c_id_table,
 };
-
-static int __init bq2415x_init(void)
-{
-       return i2c_add_driver(&bq2415x_driver);
-}
-module_init(bq2415x_init);
-
-static void __exit bq2415x_exit(void)
-{
-       i2c_del_driver(&bq2415x_driver);
-}
-module_exit(bq2415x_exit);
+module_i2c_driver(bq2415x_driver);
 
 MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
 MODULE_DESCRIPTION("bq2415x charger driver");
index 36b34efdafc9fda1d8cc96773031f5b106f005f9..8ccf5d7d0add29c12c7830036b1881977ec3f188 100644 (file)
@@ -299,7 +299,7 @@ static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
 }
 
 /*
- * Return the battery temperature in tenths of degree Celsius
+ * Return the battery temperature in tenths of degree Kelvin
  * Or < 0 if something fails.
  */
 static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
@@ -312,10 +312,8 @@ static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
                return temp;
        }
 
-       if (bq27xxx_is_chip_version_higher(di))
-               temp -= 2731;
-       else
-               temp = ((temp * 5) - 5463) / 2;
+       if (!bq27xxx_is_chip_version_higher(di))
+               temp = 5 * temp / 2;
 
        return temp;
 }
@@ -448,7 +446,6 @@ static void bq27x00_update(struct bq27x00_device_info *di)
                cache.temperature = bq27x00_battery_read_temperature(di);
                if (!is_bq27425)
                        cache.cycle_count = bq27x00_battery_read_cyct(di);
-               cache.cycle_count = bq27x00_battery_read_cyct(di);
                cache.power_avg =
                        bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG);
 
@@ -642,6 +639,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
                break;
        case POWER_SUPPLY_PROP_TEMP:
                ret = bq27x00_simple_value(di->cache.temperature, val);
+               if (ret == 0)
+                       val->intval -= 2731;
                break;
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
                ret = bq27x00_simple_value(di->cache.time_to_empty, val);
@@ -696,7 +695,6 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
        int ret;
 
        di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
-       di->chip = BQ27425;
        if (di->chip == BQ27425) {
                di->bat.properties = bq27425_battery_props;
                di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props);
index 6ba047f5ac2c35d413773368cd39f3d6bb14ae1f..8acc3f8d303c70876e9e53e01a3dffa985cddc44 100644 (file)
@@ -669,15 +669,21 @@ static void _setup_polling(struct work_struct *work)
        WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
                            ". try it later. %s\n", __func__);
 
+       /*
+        * Use mod_delayed_work() iff the next polling interval should
+        * occur before the currently scheduled one.  If @cm_monitor_work
+        * isn't active, the end result is the same, so no need to worry
+        * about stale @next_polling.
+        */
        _next_polling = jiffies + polling_jiffy;
 
-       if (!delayed_work_pending(&cm_monitor_work) ||
-           (delayed_work_pending(&cm_monitor_work) &&
-            time_after(next_polling, _next_polling))) {
-               next_polling = jiffies + polling_jiffy;
+       if (time_before(_next_polling, next_polling)) {
                mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+               next_polling = _next_polling;
+       } else {
+               if (queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy))
+                       next_polling = _next_polling;
        }
-
 out:
        mutex_unlock(&cm_list_mtx);
 }
@@ -751,8 +757,7 @@ static void misc_event_handler(struct charger_manager *cm,
        if (cm_suspended)
                device_set_wakeup_capable(cm->dev, true);
 
-       if (!delayed_work_pending(&cm_monitor_work) &&
-           is_polling_required(cm) && cm->desc->polling_interval_ms)
+       if (is_polling_required(cm) && cm->desc->polling_interval_ms)
                schedule_work(&setup_polling);
        uevent_notify(cm, default_event_names[type]);
 }
@@ -1170,8 +1175,7 @@ static int charger_extcon_notifier(struct notifier_block *self,
         * when charger cable is attached.
         */
        if (cable->attached && is_polling_required(cable->cm)) {
-               if (work_pending(&setup_polling))
-                       cancel_work_sync(&setup_polling);
+               cancel_work_sync(&setup_polling);
                schedule_work(&setup_polling);
        }
 
@@ -1215,6 +1219,55 @@ static int charger_extcon_init(struct charger_manager *cm,
        return ret;
 }
 
+/**
+ * charger_manager_register_extcon - Register extcon device to recevie state
+ *                                  of charger cable.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * This function support EXTCON(External Connector) subsystem to detect the
+ * state of charger cables for enabling or disabling charger(regulator) and
+ * select the charger cable for charging among a number of external cable
+ * according to policy of H/W board.
+ */
+static int charger_manager_register_extcon(struct charger_manager *cm)
+{
+       struct charger_desc *desc = cm->desc;
+       struct charger_regulator *charger;
+       int ret = 0;
+       int i;
+       int j;
+
+       for (i = 0; i < desc->num_charger_regulators; i++) {
+               charger = &desc->charger_regulators[i];
+
+               charger->consumer = regulator_get(cm->dev,
+                                       charger->regulator_name);
+               if (charger->consumer == NULL) {
+                       dev_err(cm->dev, "Cannot find charger(%s)n",
+                                       charger->regulator_name);
+                       ret = -EINVAL;
+                       goto err;
+               }
+               charger->cm = cm;
+
+               for (j = 0; j < charger->num_cables; j++) {
+                       struct charger_cable *cable = &charger->cables[j];
+
+                       ret = charger_extcon_init(cm, cable);
+                       if (ret < 0) {
+                               dev_err(cm->dev, "Cannot initialize charger(%s)n",
+                                               charger->regulator_name);
+                               goto err;
+                       }
+                       cable->charger = charger;
+                       cable->cm = cm;
+               }
+       }
+
+err:
+       return ret;
+}
+
 /* help function of sysfs node to control charger(regulator) */
 static ssize_t charger_name_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -1274,7 +1327,7 @@ static ssize_t charger_externally_control_store(struct device *dev,
 
        for (i = 0; i < desc->num_charger_regulators; i++) {
                if (&desc->charger_regulators[i] != charger &&
-                             !desc->charger_regulators[i].externally_control) {
+                       !desc->charger_regulators[i].externally_control) {
                        /*
                         * At least, one charger is controlled by
                         * charger-manager
@@ -1303,13 +1356,107 @@ static ssize_t charger_externally_control_store(struct device *dev,
        return count;
 }
 
+/**
+ * charger_manager_register_sysfs - Register sysfs entry for each charger
+ * @cm: the Charger Manager representing the battery.
+ *
+ * This function add sysfs entry for charger(regulator) to control charger from
+ * user-space. If some development board use one more chargers for charging
+ * but only need one charger on specific case which is dependent on user
+ * scenario or hardware restrictions, the user enter 1 or 0(zero) to '/sys/
+ * class/power_supply/battery/charger.[index]/externally_control'. For example,
+ * if user enter 1 to 'sys/class/power_supply/battery/charger.[index]/
+ * externally_control, this charger isn't controlled from charger-manager and
+ * always stay off state of regulator.
+ */
+static int charger_manager_register_sysfs(struct charger_manager *cm)
+{
+       struct charger_desc *desc = cm->desc;
+       struct charger_regulator *charger;
+       int chargers_externally_control = 1;
+       char buf[11];
+       char *str;
+       int ret = 0;
+       int i;
+
+       /* Create sysfs entry to control charger(regulator) */
+       for (i = 0; i < desc->num_charger_regulators; i++) {
+               charger = &desc->charger_regulators[i];
+
+               snprintf(buf, 10, "charger.%d", i);
+               str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
+               if (!str) {
+                       dev_err(cm->dev, "Cannot allocate memory: %s\n",
+                                       charger->regulator_name);
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               strcpy(str, buf);
+
+               charger->attrs[0] = &charger->attr_name.attr;
+               charger->attrs[1] = &charger->attr_state.attr;
+               charger->attrs[2] = &charger->attr_externally_control.attr;
+               charger->attrs[3] = NULL;
+               charger->attr_g.name = str;
+               charger->attr_g.attrs = charger->attrs;
+
+               sysfs_attr_init(&charger->attr_name.attr);
+               charger->attr_name.attr.name = "name";
+               charger->attr_name.attr.mode = 0444;
+               charger->attr_name.show = charger_name_show;
+
+               sysfs_attr_init(&charger->attr_state.attr);
+               charger->attr_state.attr.name = "state";
+               charger->attr_state.attr.mode = 0444;
+               charger->attr_state.show = charger_state_show;
+
+               sysfs_attr_init(&charger->attr_externally_control.attr);
+               charger->attr_externally_control.attr.name
+                               = "externally_control";
+               charger->attr_externally_control.attr.mode = 0644;
+               charger->attr_externally_control.show
+                               = charger_externally_control_show;
+               charger->attr_externally_control.store
+                               = charger_externally_control_store;
+
+               if (!desc->charger_regulators[i].externally_control ||
+                               !chargers_externally_control)
+                       chargers_externally_control = 0;
+
+               dev_info(cm->dev, "'%s' regulator's externally_control"
+                               "is %d\n", charger->regulator_name,
+                               charger->externally_control);
+
+               ret = sysfs_create_group(&cm->charger_psy.dev->kobj,
+                                       &charger->attr_g);
+               if (ret < 0) {
+                       dev_err(cm->dev, "Cannot create sysfs entry"
+                                       "of %s regulator\n",
+                                       charger->regulator_name);
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (chargers_externally_control) {
+               dev_err(cm->dev, "Cannot register regulator because "
+                               "charger-manager must need at least "
+                               "one charger for charging battery\n");
+
+               ret = -EINVAL;
+               goto err;
+       }
+
+err:
+       return ret;
+}
+
 static int charger_manager_probe(struct platform_device *pdev)
 {
        struct charger_desc *desc = dev_get_platdata(&pdev->dev);
        struct charger_manager *cm;
        int ret = 0, i = 0;
        int j = 0;
-       int chargers_externally_control = 1;
        union power_supply_propval val;
 
        if (g_desc && !rtc_dev && g_desc->rtc_name) {
@@ -1440,11 +1587,10 @@ static int charger_manager_probe(struct platform_device *pdev)
 
        memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default));
 
-       if (!desc->psy_name) {
+       if (!desc->psy_name)
                strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX);
-       } else {
+       else
                strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
-       }
        cm->charger_psy.name = cm->psy_name_buf;
 
        /* Allocate for psy properties because they may vary */
@@ -1496,105 +1642,19 @@ static int charger_manager_probe(struct platform_device *pdev)
                goto err_register;
        }
 
-       for (i = 0 ; i < desc->num_charger_regulators ; i++) {
-               struct charger_regulator *charger
-                                       = &desc->charger_regulators[i];
-               char buf[11];
-               char *str;
-
-               charger->consumer = regulator_get(&pdev->dev,
-                                       charger->regulator_name);
-               if (charger->consumer == NULL) {
-                       dev_err(&pdev->dev, "Cannot find charger(%s)n",
-                                               charger->regulator_name);
-                       ret = -EINVAL;
-                       goto err_chg_get;
-               }
-               charger->cm = cm;
-
-               for (j = 0 ; j < charger->num_cables ; j++) {
-                       struct charger_cable *cable = &charger->cables[j];
-
-                       ret = charger_extcon_init(cm, cable);
-                       if (ret < 0) {
-                               dev_err(&pdev->dev, "Cannot find charger(%s)n",
-                                               charger->regulator_name);
-                               goto err_extcon;
-                       }
-                       cable->charger = charger;
-                       cable->cm = cm;
-               }
-
-               /* Create sysfs entry to control charger(regulator) */
-               snprintf(buf, 10, "charger.%d", i);
-               str = kzalloc(sizeof(char) * (strlen(buf) + 1), GFP_KERNEL);
-               if (!str) {
-                       for (i--; i >= 0; i--) {
-                               charger = &desc->charger_regulators[i];
-                               kfree(charger->attr_g.name);
-                       }
-                       ret = -ENOMEM;
-
-                       goto err_extcon;
-               }
-               strcpy(str, buf);
-
-               charger->attrs[0] = &charger->attr_name.attr;
-               charger->attrs[1] = &charger->attr_state.attr;
-               charger->attrs[2] = &charger->attr_externally_control.attr;
-               charger->attrs[3] = NULL;
-               charger->attr_g.name = str;
-               charger->attr_g.attrs = charger->attrs;
-
-               sysfs_attr_init(&charger->attr_name.attr);
-               charger->attr_name.attr.name = "name";
-               charger->attr_name.attr.mode = 0444;
-               charger->attr_name.show = charger_name_show;
-
-               sysfs_attr_init(&charger->attr_state.attr);
-               charger->attr_state.attr.name = "state";
-               charger->attr_state.attr.mode = 0444;
-               charger->attr_state.show = charger_state_show;
-
-               sysfs_attr_init(&charger->attr_externally_control.attr);
-               charger->attr_externally_control.attr.name
-                               = "externally_control";
-               charger->attr_externally_control.attr.mode = 0644;
-               charger->attr_externally_control.show
-                               = charger_externally_control_show;
-               charger->attr_externally_control.store
-                               = charger_externally_control_store;
-
-               if (!desc->charger_regulators[i].externally_control ||
-                               !chargers_externally_control) {
-                       chargers_externally_control = 0;
-               }
-               dev_info(&pdev->dev, "'%s' regulator's externally_control"
-                               "is %d\n", charger->regulator_name,
-                               charger->externally_control);
-
-               ret = sysfs_create_group(&cm->charger_psy.dev->kobj,
-                               &charger->attr_g);
-               if (ret < 0) {
-                       dev_info(&pdev->dev, "Cannot create sysfs entry"
-                                       "of %s regulator\n",
-                                       charger->regulator_name);
-               }
-       }
-
-       if (chargers_externally_control) {
-               dev_err(&pdev->dev, "Cannot register regulator because "
-                               "charger-manager must need at least "
-                               "one charger for charging battery\n");
-
-               ret = -EINVAL;
-               goto err_chg_enable;
+       /* Register extcon device for charger cable */
+       ret = charger_manager_register_extcon(cm);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Cannot initialize extcon device\n");
+               goto err_reg_extcon;
        }
 
-       ret = try_charger_enable(cm, true);
-       if (ret) {
-               dev_err(&pdev->dev, "Cannot enable charger regulators\n");
-               goto err_chg_enable;
+       /* Register sysfs entry for charger(regulator) */
+       ret = charger_manager_register_sysfs(cm);
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "Cannot initialize sysfs entry of regulator\n");
+               goto err_reg_sysfs;
        }
 
        /* Add to the list */
@@ -1613,27 +1673,28 @@ static int charger_manager_probe(struct platform_device *pdev)
 
        return 0;
 
-err_chg_enable:
+err_reg_sysfs:
        for (i = 0; i < desc->num_charger_regulators; i++) {
                struct charger_regulator *charger;
 
                charger = &desc->charger_regulators[i];
                sysfs_remove_group(&cm->charger_psy.dev->kobj,
                                &charger->attr_g);
+
                kfree(charger->attr_g.name);
        }
-err_extcon:
-       for (i = 0 ; i < desc->num_charger_regulators ; i++) {
-               struct charger_regulator *charger
-                               = &desc->charger_regulators[i];
-               for (j = 0 ; j < charger->num_cables ; j++) {
+err_reg_extcon:
+       for (i = 0; i < desc->num_charger_regulators; i++) {
+               struct charger_regulator *charger;
+
+               charger = &desc->charger_regulators[i];
+               for (j = 0; j < charger->num_cables; j++) {
                        struct charger_cable *cable = &charger->cables[j];
                        extcon_unregister_interest(&cable->extcon_dev);
                }
-       }
-err_chg_get:
-       for (i = 0 ; i < desc->num_charger_regulators ; i++)
+
                regulator_put(desc->charger_regulators[i].consumer);
+       }
 
        power_supply_unregister(&cm->charger_psy);
 err_register:
@@ -1661,10 +1722,8 @@ static int charger_manager_remove(struct platform_device *pdev)
        list_del(&cm->entry);
        mutex_unlock(&cm_list_mtx);
 
-       if (work_pending(&setup_polling))
-               cancel_work_sync(&setup_polling);
-       if (delayed_work_pending(&cm_monitor_work))
-               cancel_delayed_work_sync(&cm_monitor_work);
+       cancel_work_sync(&setup_polling);
+       cancel_delayed_work_sync(&cm_monitor_work);
 
        for (i = 0 ; i < desc->num_charger_regulators ; i++) {
                struct charger_regulator *charger
@@ -1733,8 +1792,7 @@ static int cm_suspend_prepare(struct device *dev)
                cm_suspended = true;
        }
 
-       if (delayed_work_pending(&cm->fullbatt_vchk_work))
-               cancel_delayed_work(&cm->fullbatt_vchk_work);
+       cancel_delayed_work(&cm->fullbatt_vchk_work);
        cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
        cm->status_save_batt = is_batt_present(cm);
 
index 94762e67e22b69d50c28a587586a83e9266323f9..e8c5a391a4987b650f4699b516d6371456b06bda 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/notifier.h>
 
 #define DA9030_FAULT_LOG               0x0a
 #define DA9030_FAULT_LOG_OVER_TEMP     (1 << 7)
index 3c5c2e459d73c73b5f41493da8d383c74d606b3a..08193feb3b088bc7ae7d43134e6a12e7e2f75da9 100644 (file)
@@ -337,7 +337,7 @@ static unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp)
        if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1])
                return DA9052_VC_TBL_REF_SZ - 1;
 
-       for (i = 0; i < DA9052_VC_TBL_REF_SZ; i++) {
+       for (i = 0; i < DA9052_VC_TBL_REF_SZ - 1; i++) {
                if ((adc_temp > vc_tbl_ref[i]) &&
                    (adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])))
                                return i;
index 2fa9b6bf1f3f9e08d9d5512e875ec377902898f9..e7301b3ed6237566de0325b53645dd15c4b6a769 100644 (file)
@@ -7,6 +7,8 @@
  *
  * DS2786 added by Yulia Vilensky <vilensky@compulab.co.il>
  *
+ * UEvent sending added by Evgeny Romanov <romanov@neurosoft.ru>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -19,6 +21,7 @@
 #include <linux/errno.h>
 #include <linux/swab.h>
 #include <linux/i2c.h>
+#include <linux/delay.h>
 #include <linux/idr.h>
 #include <linux/power_supply.h>
 #include <linux/slab.h>
@@ -40,6 +43,8 @@
 
 #define DS2786_CURRENT_UNITS   25
 
+#define DS278x_DELAY           1000
+
 struct ds278x_info;
 
 struct ds278x_battery_ops {
@@ -54,8 +59,11 @@ struct ds278x_info {
        struct i2c_client       *client;
        struct power_supply     battery;
        struct ds278x_battery_ops  *ops;
+       struct delayed_work     bat_work;
        int                     id;
        int                     rsns;
+       int                     capacity;
+       int                     status;         /* State Of Charge */
 };
 
 static DEFINE_IDR(battery_id);
@@ -220,6 +228,8 @@ static int ds278x_get_status(struct ds278x_info *info, int *status)
        if (err)
                return err;
 
+       info->capacity = capacity;
+
        if (capacity == 100)
                *status = POWER_SUPPLY_STATUS_FULL;
        else if (current_uA == 0)
@@ -267,6 +277,27 @@ static int ds278x_battery_get_property(struct power_supply *psy,
        return ret;
 }
 
+static void ds278x_bat_update(struct ds278x_info *info)
+{
+       int old_status = info->status;
+       int old_capacity = info->capacity;
+
+       ds278x_get_status(info, &info->status);
+
+       if ((old_status != info->status) || (old_capacity != info->capacity))
+               power_supply_changed(&info->battery);
+}
+
+static void ds278x_bat_work(struct work_struct *work)
+{
+       struct ds278x_info *info;
+
+       info = container_of(work, struct ds278x_info, bat_work.work);
+       ds278x_bat_update(info);
+
+       schedule_delayed_work(&info->bat_work, DS278x_DELAY);
+}
+
 static enum power_supply_property ds278x_battery_props[] = {
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_CAPACITY,
@@ -295,10 +326,39 @@ static int ds278x_battery_remove(struct i2c_client *client)
        idr_remove(&battery_id, info->id);
        mutex_unlock(&battery_lock);
 
+       cancel_delayed_work(&info->bat_work);
+
        kfree(info);
        return 0;
 }
 
+#ifdef CONFIG_PM
+
+static int ds278x_suspend(struct i2c_client *client,
+               pm_message_t state)
+{
+       struct ds278x_info *info = i2c_get_clientdata(client);
+
+       cancel_delayed_work(&info->bat_work);
+       return 0;
+}
+
+static int ds278x_resume(struct i2c_client *client)
+{
+       struct ds278x_info *info = i2c_get_clientdata(client);
+
+       schedule_delayed_work(&info->bat_work, DS278x_DELAY);
+       return 0;
+}
+
+#else
+
+#define ds278x_suspend NULL
+#define ds278x_resume NULL
+
+#endif /* CONFIG_PM */
+
+
 enum ds278x_num_id {
        DS2782 = 0,
        DS2786,
@@ -368,10 +428,17 @@ static int ds278x_battery_probe(struct i2c_client *client,
        info->ops  = &ds278x_ops[id->driver_data];
        ds278x_power_supply_init(&info->battery);
 
+       info->capacity = 100;
+       info->status = POWER_SUPPLY_STATUS_FULL;
+
+       INIT_DELAYED_WORK(&info->bat_work, ds278x_bat_work);
+
        ret = power_supply_register(&client->dev, &info->battery);
        if (ret) {
                dev_err(&client->dev, "failed to register battery\n");
                goto fail_register;
+       } else {
+               schedule_delayed_work(&info->bat_work, DS278x_DELAY);
        }
 
        return 0;
@@ -401,6 +468,8 @@ static struct i2c_driver ds278x_battery_driver = {
        },
        .probe          = ds278x_battery_probe,
        .remove         = ds278x_battery_remove,
+       .suspend        = ds278x_suspend,
+       .resume         = ds278x_resume,
        .id_table       = ds278x_id,
 };
 module_i2c_driver(ds278x_battery_driver);
index 32ce17e235c08ef3644c81573a22ad37417e50b2..836816b82cbc46d336c3dd79adc90393b6ca7e39 100644 (file)
@@ -263,9 +263,6 @@ static int gab_probe(struct platform_device *pdev)
        psy->external_power_changed = gab_ext_power_changed;
        adc_bat->pdata = pdata;
 
-       /* calculate the total number of channels */
-       chan = ARRAY_SIZE(gab_chan_name);
-
        /*
         * copying the static properties and allocating extra memory for holding
         * the extra configurable properties received from platform data.
@@ -291,6 +288,7 @@ static int gab_probe(struct platform_device *pdev)
                                                gab_chan_name[chan]);
                if (IS_ERR(adc_bat->channel[chan])) {
                        ret = PTR_ERR(adc_bat->channel[chan]);
+                       adc_bat->channel[chan] = NULL;
                } else {
                        /* copying properties for supported channels only */
                        memcpy(properties + sizeof(*(psy->properties)) * index,
@@ -344,8 +342,10 @@ err_gpio:
 gpio_req_fail:
        power_supply_unregister(psy);
 err_reg_fail:
-       for (chan = 0; ARRAY_SIZE(gab_chan_name); chan++)
-               iio_channel_release(adc_bat->channel[chan]);
+       for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
+               if (adc_bat->channel[chan])
+                       iio_channel_release(adc_bat->channel[chan]);
+       }
 second_mem_fail:
        kfree(psy->properties);
 first_mem_fail:
@@ -365,8 +365,10 @@ static int gab_remove(struct platform_device *pdev)
                gpio_free(pdata->gpio_charge_finished);
        }
 
-       for (chan = 0; ARRAY_SIZE(gab_chan_name); chan++)
-               iio_channel_release(adc_bat->channel[chan]);
+       for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
+               if (adc_bat->channel[chan])
+                       iio_channel_release(adc_bat->channel[chan]);
+       }
 
        kfree(adc_bat->psy.properties);
        cancel_delayed_work(&adc_bat->bat_work);
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c
new file mode 100644 (file)
index 0000000..c10f460
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Power supply driver for the goldfish emulator
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2012 Intel, Inc.
+ * Copyright (C) 2013 Intel, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct goldfish_battery_data {
+       void __iomem *reg_base;
+       int irq;
+       spinlock_t lock;
+
+       struct power_supply battery;
+       struct power_supply ac;
+};
+
+#define GOLDFISH_BATTERY_READ(data, addr) \
+       (readl(data->reg_base + addr))
+#define GOLDFISH_BATTERY_WRITE(data, addr, x) \
+       (writel(x, data->reg_base + addr))
+
+/*
+ * Temporary variable used between goldfish_battery_probe() and
+ * goldfish_battery_open().
+ */
+static struct goldfish_battery_data *battery_data;
+
+enum {
+       /* status register */
+       BATTERY_INT_STATUS          = 0x00,
+       /* set this to enable IRQ */
+       BATTERY_INT_ENABLE          = 0x04,
+
+       BATTERY_AC_ONLINE       = 0x08,
+       BATTERY_STATUS          = 0x0C,
+       BATTERY_HEALTH          = 0x10,
+       BATTERY_PRESENT         = 0x14,
+       BATTERY_CAPACITY        = 0x18,
+
+       BATTERY_STATUS_CHANGED  = 1U << 0,
+       AC_STATUS_CHANGED       = 1U << 1,
+       BATTERY_INT_MASK        = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED,
+};
+
+
+static int goldfish_ac_get_property(struct power_supply *psy,
+                       enum power_supply_property psp,
+                       union power_supply_propval *val)
+{
+       struct goldfish_battery_data *data = container_of(psy,
+               struct goldfish_battery_data, ac);
+       int ret = 0;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_ONLINE:
+               val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_AC_ONLINE);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int goldfish_battery_get_property(struct power_supply *psy,
+                                enum power_supply_property psp,
+                                union power_supply_propval *val)
+{
+       struct goldfish_battery_data *data = container_of(psy,
+               struct goldfish_battery_data, battery);
+       int ret = 0;
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_STATUS);
+               break;
+       case POWER_SUPPLY_PROP_HEALTH:
+               val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_HEALTH);
+               break;
+       case POWER_SUPPLY_PROP_PRESENT:
+               val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_PRESENT);
+               break;
+       case POWER_SUPPLY_PROP_TECHNOLOGY:
+               val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+               break;
+       case POWER_SUPPLY_PROP_CAPACITY:
+               val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CAPACITY);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static enum power_supply_property goldfish_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static enum power_supply_property goldfish_ac_props[] = {
+       POWER_SUPPLY_PROP_ONLINE,
+};
+
+static irqreturn_t goldfish_battery_interrupt(int irq, void *dev_id)
+{
+       unsigned long irq_flags;
+       struct goldfish_battery_data *data = dev_id;
+       uint32_t status;
+
+       spin_lock_irqsave(&data->lock, irq_flags);
+
+       /* read status flags, which will clear the interrupt */
+       status = GOLDFISH_BATTERY_READ(data, BATTERY_INT_STATUS);
+       status &= BATTERY_INT_MASK;
+
+       if (status & BATTERY_STATUS_CHANGED)
+               power_supply_changed(&data->battery);
+       if (status & AC_STATUS_CHANGED)
+               power_supply_changed(&data->ac);
+
+       spin_unlock_irqrestore(&data->lock, irq_flags);
+       return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+
+static int goldfish_battery_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct resource *r;
+       struct goldfish_battery_data *data;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       spin_lock_init(&data->lock);
+
+       data->battery.properties = goldfish_battery_props;
+       data->battery.num_properties = ARRAY_SIZE(goldfish_battery_props);
+       data->battery.get_property = goldfish_battery_get_property;
+       data->battery.name = "battery";
+       data->battery.type = POWER_SUPPLY_TYPE_BATTERY;
+
+       data->ac.properties = goldfish_ac_props;
+       data->ac.num_properties = ARRAY_SIZE(goldfish_ac_props);
+       data->ac.get_property = goldfish_ac_get_property;
+       data->ac.name = "ac";
+       data->ac.type = POWER_SUPPLY_TYPE_MAINS;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (r == NULL) {
+               dev_err(&pdev->dev, "platform_get_resource failed\n");
+               return -ENODEV;
+       }
+
+       data->reg_base = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
+       if (data->reg_base == NULL) {
+               dev_err(&pdev->dev, "unable to remap MMIO\n");
+               return -ENOMEM;
+       }
+
+       data->irq = platform_get_irq(pdev, 0);
+       if (data->irq < 0) {
+               dev_err(&pdev->dev, "platform_get_irq failed\n");
+               return -ENODEV;
+       }
+
+       ret = devm_request_irq(&pdev->dev, data->irq, goldfish_battery_interrupt,
+                                               IRQF_SHARED, pdev->name, data);
+       if (ret)
+               return ret;
+
+       ret = power_supply_register(&pdev->dev, &data->ac);
+       if (ret)
+               return ret;
+
+       ret = power_supply_register(&pdev->dev, &data->battery);
+       if (ret) {
+               power_supply_unregister(&data->ac);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, data);
+       battery_data = data;
+
+       GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK);
+       return 0;
+}
+
+static int goldfish_battery_remove(struct platform_device *pdev)
+{
+       struct goldfish_battery_data *data = platform_get_drvdata(pdev);
+
+       power_supply_unregister(&data->battery);
+       power_supply_unregister(&data->ac);
+       battery_data = NULL;
+       return 0;
+}
+
+static struct platform_driver goldfish_battery_device = {
+       .probe          = goldfish_battery_probe,
+       .remove         = goldfish_battery_remove,
+       .driver = {
+               .name = "goldfish-battery"
+       }
+};
+module_platform_driver(goldfish_battery_device);
+
+MODULE_AUTHOR("Mike Lockwood lockwood@android.com");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Battery driver for the Goldfish emulator");
index 4ee71a90e2483f90a32cc2857a0b95833d7cffb8..5ef41b819172844a9b2ef594888c0dfada7b9e94 100644 (file)
@@ -367,28 +367,28 @@ static int lp8727_battery_get_property(struct power_supply *psy,
                        return -EINVAL;
 
                if (pdata->get_batt_present)
-                       val->intval = pchg->pdata->get_batt_present();
+                       val->intval = pdata->get_batt_present();
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
                if (!pdata)
                        return -EINVAL;
 
                if (pdata->get_batt_level)
-                       val->intval = pchg->pdata->get_batt_level();
+                       val->intval = pdata->get_batt_level();
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
                if (!pdata)
                        return -EINVAL;
 
                if (pdata->get_batt_capacity)
-                       val->intval = pchg->pdata->get_batt_capacity();
+                       val->intval = pdata->get_batt_capacity();
                break;
        case POWER_SUPPLY_PROP_TEMP:
                if (!pdata)
                        return -EINVAL;
 
                if (pdata->get_batt_temp)
-                       val->intval = pchg->pdata->get_batt_temp();
+                       val->intval = pdata->get_batt_temp();
                break;
        default:
                break;
index 22b6407c9ca9949d65b0c719e895549e8bb3e902..e33d6b2a7a56eb0a5dd968b1b28506aca55868f8 100644 (file)
@@ -367,7 +367,8 @@ static inline bool lp8788_is_valid_charger_register(u8 addr)
        return addr >= LP8788_CHG_START && addr <= LP8788_CHG_END;
 }
 
-static int lp8788_update_charger_params(struct lp8788_charger *pchg)
+static int lp8788_update_charger_params(struct platform_device *pdev,
+                                       struct lp8788_charger *pchg)
 {
        struct lp8788 *lp = pchg->lp;
        struct lp8788_charger_platform_data *pdata = pchg->pdata;
@@ -376,7 +377,7 @@ static int lp8788_update_charger_params(struct lp8788_charger *pchg)
        int ret;
 
        if (!pdata || !pdata->chg_params) {
-               dev_info(lp->dev, "skip updating charger parameters\n");
+               dev_info(&pdev->dev, "skip updating charger parameters\n");
                return 0;
        }
 
@@ -537,7 +538,6 @@ err_free_irq:
 static int lp8788_irq_register(struct platform_device *pdev,
                                struct lp8788_charger *pchg)
 {
-       struct lp8788 *lp = pchg->lp;
        const char *name[] = {
                LP8788_CHG_IRQ, LP8788_PRSW_IRQ, LP8788_BATT_IRQ
        };
@@ -550,13 +550,13 @@ static int lp8788_irq_register(struct platform_device *pdev,
        for (i = 0; i < ARRAY_SIZE(name); i++) {
                ret = lp8788_set_irqs(pdev, pchg, name[i]);
                if (ret) {
-                       dev_warn(lp->dev, "irq setup failed: %s\n", name[i]);
+                       dev_warn(&pdev->dev, "irq setup failed: %s\n", name[i]);
                        return ret;
                }
        }
 
        if (pchg->num_irqs > LP8788_MAX_CHG_IRQS) {
-               dev_err(lp->dev, "invalid total number of irqs: %d\n",
+               dev_err(&pdev->dev, "invalid total number of irqs: %d\n",
                        pchg->num_irqs);
                return -EINVAL;
        }
@@ -690,9 +690,10 @@ static int lp8788_charger_probe(struct platform_device *pdev)
 {
        struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
        struct lp8788_charger *pchg;
+       struct device *dev = &pdev->dev;
        int ret;
 
-       pchg = devm_kzalloc(lp->dev, sizeof(struct lp8788_charger), GFP_KERNEL);
+       pchg = devm_kzalloc(dev, sizeof(struct lp8788_charger), GFP_KERNEL);
        if (!pchg)
                return -ENOMEM;
 
@@ -700,7 +701,7 @@ static int lp8788_charger_probe(struct platform_device *pdev)
        pchg->pdata = lp->pdata ? lp->pdata->chg_pdata : NULL;
        platform_set_drvdata(pdev, pchg);
 
-       ret = lp8788_update_charger_params(pchg);
+       ret = lp8788_update_charger_params(pdev, pchg);
        if (ret)
                return ret;
 
@@ -718,7 +719,7 @@ static int lp8788_charger_probe(struct platform_device *pdev)
 
        ret = lp8788_irq_register(pdev, pchg);
        if (ret)
-               dev_warn(lp->dev, "failed to register charger irq: %d\n", ret);
+               dev_warn(dev, "failed to register charger irq: %d\n", ret);
 
        return 0;
 }
index 22cfe9cc4727c161b2306edc5aa91688ee481ede..74a0bd9bc1621ef8a139ebace08e6d8f12c2f775 100644 (file)
@@ -207,7 +207,7 @@ static int max17040_probe(struct i2c_client *client,
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
                return -EIO;
 
-       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
        if (!chip)
                return -ENOMEM;
 
@@ -225,7 +225,6 @@ static int max17040_probe(struct i2c_client *client,
        ret = power_supply_register(&client->dev, &chip->battery);
        if (ret) {
                dev_err(&client->dev, "failed: power supply register\n");
-               kfree(chip);
                return ret;
        }
 
@@ -244,7 +243,6 @@ static int max17040_remove(struct i2c_client *client)
 
        power_supply_unregister(&chip->battery);
        cancel_delayed_work(&chip->work);
-       kfree(chip);
        return 0;
 }
 
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
new file mode 100644 (file)
index 0000000..ed48d75
--- /dev/null
@@ -0,0 +1,1088 @@
+/*
+ * Copyright 2012 ST Ericsson.
+ *
+ * Power supply driver for ST Ericsson pm2xxx_charger charger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/completion.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/kobject.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/mfd/abx500/ux500_chargalg.h>
+#include <linux/pm2301_charger.h>
+#include <linux/gpio.h>
+
+#include "pm2301_charger.h"
+
+#define to_pm2xxx_charger_ac_device_info(x) container_of((x), \
+               struct pm2xxx_charger, ac_chg)
+
+static int pm2xxx_interrupt_registers[] = {
+       PM2XXX_REG_INT1,
+       PM2XXX_REG_INT2,
+       PM2XXX_REG_INT3,
+       PM2XXX_REG_INT4,
+       PM2XXX_REG_INT5,
+       PM2XXX_REG_INT6,
+};
+
+static enum power_supply_property pm2xxx_charger_ac_props[] = {
+       POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_PRESENT,
+       POWER_SUPPLY_PROP_ONLINE,
+       POWER_SUPPLY_PROP_VOLTAGE_AVG,
+};
+
+static int pm2xxx_charger_voltage_map[] = {
+       3500,
+       3525,
+       3550,
+       3575,
+       3600,
+       3625,
+       3650,
+       3675,
+       3700,
+       3725,
+       3750,
+       3775,
+       3800,
+       3825,
+       3850,
+       3875,
+       3900,
+       3925,
+       3950,
+       3975,
+       4000,
+       4025,
+       4050,
+       4075,
+       4100,
+       4125,
+       4150,
+       4175,
+       4200,
+       4225,
+       4250,
+       4275,
+       4300,
+};
+
+static int pm2xxx_charger_current_map[] = {
+       200,
+       200,
+       400,
+       600,
+       800,
+       1000,
+       1200,
+       1400,
+       1600,
+       1800,
+       2000,
+       2200,
+       2400,
+       2600,
+       2800,
+       3000,
+};
+
+static const struct i2c_device_id pm2xxx_ident[] = {
+       { "pm2301", 0 },
+       { }
+};
+
+static void set_lpn_pin(struct pm2xxx_charger *pm2)
+{
+       if (pm2->ac.charger_connected)
+               return;
+       gpio_set_value(pm2->lpn_pin, 1);
+
+       return;
+}
+
+static void clear_lpn_pin(struct pm2xxx_charger *pm2)
+{
+       if (pm2->ac.charger_connected)
+               return;
+       gpio_set_value(pm2->lpn_pin, 0);
+
+       return;
+}
+
+static int pm2xxx_reg_read(struct pm2xxx_charger *pm2, int reg, u8 *val)
+{
+       int ret;
+       /*
+        * When AC adaptor is unplugged, the host
+        * must put LPN high to be able to
+        * communicate by I2C with PM2301
+        * and receive I2C "acknowledge" from PM2301.
+        */
+       mutex_lock(&pm2->lock);
+       set_lpn_pin(pm2);
+
+       ret = i2c_smbus_read_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
+                               1, val);
+       if (ret < 0)
+               dev_err(pm2->dev, "Error reading register at 0x%x\n", reg);
+       else
+               ret = 0;
+       clear_lpn_pin(pm2);
+       mutex_unlock(&pm2->lock);
+
+       return ret;
+}
+
+static int pm2xxx_reg_write(struct pm2xxx_charger *pm2, int reg, u8 val)
+{
+       int ret;
+       /*
+        * When AC adaptor is unplugged, the host
+        * must put LPN high to be able to
+        * communicate by I2C with PM2301
+        * and receive I2C "acknowledge" from PM2301.
+        */
+       mutex_lock(&pm2->lock);
+       set_lpn_pin(pm2);
+
+       ret = i2c_smbus_write_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
+                               1, &val);
+       if (ret < 0)
+               dev_err(pm2->dev, "Error writing register at 0x%x\n", reg);
+       else
+               ret = 0;
+       clear_lpn_pin(pm2);
+       mutex_unlock(&pm2->lock);
+
+       return ret;
+}
+
+static int pm2xxx_charging_enable_mngt(struct pm2xxx_charger *pm2)
+{
+       int ret;
+
+       /* Enable charging */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
+                       (PM2XXX_CH_AUTO_RESUME_EN | PM2XXX_CHARGER_ENA));
+
+       return ret;
+}
+
+static int pm2xxx_charging_disable_mngt(struct pm2xxx_charger *pm2)
+{
+       int ret;
+
+       /* Disable charging */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
+                       (PM2XXX_CH_AUTO_RESUME_DIS | PM2XXX_CHARGER_DIS));
+
+       return ret;
+}
+
+static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
+{
+       queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
+
+       return 0;
+}
+
+
+int pm2xxx_charger_die_therm_mngt(struct pm2xxx_charger *pm2, int val)
+{
+       queue_work(pm2->charger_wq, &pm2->check_main_thermal_prot_work);
+
+       return 0;
+}
+
+static int pm2xxx_charger_ovv_mngt(struct pm2xxx_charger *pm2, int val)
+{
+       int ret = 0;
+
+       pm2->failure_input_ovv++;
+       if (pm2->failure_input_ovv < 4) {
+               ret = pm2xxx_charging_enable_mngt(pm2);
+               goto out;
+       } else {
+               pm2->failure_input_ovv = 0;
+               dev_err(pm2->dev, "Overvoltage detected\n");
+               pm2->flags.ovv = true;
+               power_supply_changed(&pm2->ac_chg.psy);
+       }
+
+out:
+       return ret;
+}
+
+static int pm2xxx_charger_wd_exp_mngt(struct pm2xxx_charger *pm2, int val)
+{
+       dev_dbg(pm2->dev , "20 minutes watchdog occured\n");
+
+       pm2->ac.wd_expired = true;
+       power_supply_changed(&pm2->ac_chg.psy);
+
+       return 0;
+}
+
+static int pm2xxx_charger_vbat_lsig_mngt(struct pm2xxx_charger *pm2, int val)
+{
+       switch (val) {
+       case PM2XXX_INT1_ITVBATLOWR:
+               dev_dbg(pm2->dev, "VBAT grows above VBAT_LOW level\n");
+               break;
+
+       case PM2XXX_INT1_ITVBATLOWF:
+               dev_dbg(pm2->dev, "VBAT drops below VBAT_LOW level\n");
+               break;
+
+       default:
+               dev_err(pm2->dev, "Unknown VBAT level\n");
+       }
+
+       return 0;
+}
+
+static int pm2xxx_charger_bat_disc_mngt(struct pm2xxx_charger *pm2, int val)
+{
+       dev_dbg(pm2->dev, "battery disconnected\n");
+
+       return 0;
+}
+
+static int pm2xxx_charger_detection(struct pm2xxx_charger *pm2, u8 *val)
+{
+       int ret;
+
+       ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT2, val);
+
+       if (ret < 0) {
+               dev_err(pm2->dev, "Charger detection failed\n");
+               goto out;
+       }
+
+       *val &= (PM2XXX_INT2_S_ITVPWR1PLUG | PM2XXX_INT2_S_ITVPWR2PLUG);
+
+out:
+       return ret;
+}
+
+static int pm2xxx_charger_itv_pwr_plug_mngt(struct pm2xxx_charger *pm2, int val)
+{
+
+       int ret;
+       u8 read_val;
+
+       /*
+        * Since we can't be sure that the events are received
+        * synchronously, we have the check if the main charger is
+        * connected by reading the interrupt source register.
+        */
+       ret = pm2xxx_charger_detection(pm2, &read_val);
+
+       if ((ret == 0) && read_val) {
+               pm2->ac.charger_connected = 1;
+               pm2->ac_conn = true;
+               queue_work(pm2->charger_wq, &pm2->ac_work);
+       }
+
+
+       return ret;
+}
+
+static int pm2xxx_charger_itv_pwr_unplug_mngt(struct pm2xxx_charger *pm2,
+                                                               int val)
+{
+       pm2->ac.charger_connected = 0;
+       queue_work(pm2->charger_wq, &pm2->ac_work);
+
+       return 0;
+}
+
+static int pm2_int_reg0(void *pm2_data, int val)
+{
+       struct pm2xxx_charger *pm2 = pm2_data;
+       int ret = 0;
+
+       if (val & (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF)) {
+               ret = pm2xxx_charger_vbat_lsig_mngt(pm2, val &
+                       (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF));
+       }
+
+       if (val & PM2XXX_INT1_ITVBATDISCONNECT) {
+               ret = pm2xxx_charger_bat_disc_mngt(pm2,
+                               PM2XXX_INT1_ITVBATDISCONNECT);
+       }
+
+       return ret;
+}
+
+static int pm2_int_reg1(void *pm2_data, int val)
+{
+       struct pm2xxx_charger *pm2 = pm2_data;
+       int ret = 0;
+
+       if (val & (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG)) {
+               dev_dbg(pm2->dev , "Main charger plugged\n");
+               ret = pm2xxx_charger_itv_pwr_plug_mngt(pm2, val &
+                       (PM2XXX_INT2_ITVPWR1PLUG | PM2XXX_INT2_ITVPWR2PLUG));
+       }
+
+       if (val &
+               (PM2XXX_INT2_ITVPWR1UNPLUG | PM2XXX_INT2_ITVPWR2UNPLUG)) {
+               dev_dbg(pm2->dev , "Main charger unplugged\n");
+               ret = pm2xxx_charger_itv_pwr_unplug_mngt(pm2, val &
+                                               (PM2XXX_INT2_ITVPWR1UNPLUG |
+                                               PM2XXX_INT2_ITVPWR2UNPLUG));
+       }
+
+       return ret;
+}
+
+static int pm2_int_reg2(void *pm2_data, int val)
+{
+       struct pm2xxx_charger *pm2 = pm2_data;
+       int ret = 0;
+
+       if (val & PM2XXX_INT3_ITAUTOTIMEOUTWD)
+               ret = pm2xxx_charger_wd_exp_mngt(pm2, val);
+
+       if (val & (PM2XXX_INT3_ITCHPRECHARGEWD |
+                               PM2XXX_INT3_ITCHCCWD | PM2XXX_INT3_ITCHCVWD)) {
+               dev_dbg(pm2->dev,
+                       "Watchdog occured for precharge, CC and CV charge\n");
+       }
+
+       return ret;
+}
+
+static int pm2_int_reg3(void *pm2_data, int val)
+{
+       struct pm2xxx_charger *pm2 = pm2_data;
+       int ret = 0;
+
+       if (val & (PM2XXX_INT4_ITCHARGINGON)) {
+               dev_dbg(pm2->dev ,
+                       "chargind operation has started\n");
+       }
+
+       if (val & (PM2XXX_INT4_ITVRESUME)) {
+               dev_dbg(pm2->dev,
+                       "battery discharged down to VResume threshold\n");
+       }
+
+       if (val & (PM2XXX_INT4_ITBATTFULL)) {
+               dev_dbg(pm2->dev , "battery fully detected\n");
+       }
+
+       if (val & (PM2XXX_INT4_ITCVPHASE)) {
+               dev_dbg(pm2->dev, "CV phase enter with 0.5C charging\n");
+       }
+
+       if (val & (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV)) {
+               pm2->failure_case = VPWR_OVV;
+               ret = pm2xxx_charger_ovv_mngt(pm2, val &
+                       (PM2XXX_INT4_ITVPWR2OVV | PM2XXX_INT4_ITVPWR1OVV));
+               dev_dbg(pm2->dev, "VPWR/VSYSTEM overvoltage detected\n");
+       }
+
+       if (val & (PM2XXX_INT4_S_ITBATTEMPCOLD |
+                               PM2XXX_INT4_S_ITBATTEMPHOT)) {
+               ret = pm2xxx_charger_batt_therm_mngt(pm2, val &
+                       (PM2XXX_INT4_S_ITBATTEMPCOLD |
+                       PM2XXX_INT4_S_ITBATTEMPHOT));
+               dev_dbg(pm2->dev, "BTEMP is too Low/High\n");
+       }
+
+       return ret;
+}
+
+static int pm2_int_reg4(void *pm2_data, int val)
+{
+       struct pm2xxx_charger *pm2 = pm2_data;
+       int ret = 0;
+
+       if (val & PM2XXX_INT5_ITVSYSTEMOVV) {
+               pm2->failure_case = VSYSTEM_OVV;
+               ret = pm2xxx_charger_ovv_mngt(pm2, val &
+                                               PM2XXX_INT5_ITVSYSTEMOVV);
+               dev_dbg(pm2->dev, "VSYSTEM overvoltage detected\n");
+       }
+
+       if (val & (PM2XXX_INT5_ITTHERMALWARNINGFALL |
+                               PM2XXX_INT5_ITTHERMALWARNINGRISE |
+                               PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
+                               PM2XXX_INT5_ITTHERMALSHUTDOWNRISE)) {
+               dev_dbg(pm2->dev, "BTEMP die temperature is too Low/High\n");
+               ret = pm2xxx_charger_die_therm_mngt(pm2, val &
+                       (PM2XXX_INT5_ITTHERMALWARNINGFALL |
+                       PM2XXX_INT5_ITTHERMALWARNINGRISE |
+                       PM2XXX_INT5_ITTHERMALSHUTDOWNFALL |
+                       PM2XXX_INT5_ITTHERMALSHUTDOWNRISE));
+       }
+
+       return ret;
+}
+
+static int pm2_int_reg5(void *pm2_data, int val)
+{
+       struct pm2xxx_charger *pm2 = pm2_data;
+       int ret = 0;
+
+
+       if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
+               dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
+       }
+
+       if (val & (PM2XXX_INT6_ITVPWR2VALIDRISE |
+                       PM2XXX_INT6_ITVPWR1VALIDRISE |
+                       PM2XXX_INT6_ITVPWR2VALIDFALL |
+                       PM2XXX_INT6_ITVPWR1VALIDFALL)) {
+               dev_dbg(pm2->dev, "Falling/Rising edge on WPWR1/2\n");
+       }
+
+       return ret;
+}
+
+static irqreturn_t  pm2xxx_irq_int(int irq, void *data)
+{
+       struct pm2xxx_charger *pm2 = data;
+       struct pm2xxx_interrupts *interrupt = pm2->pm2_int;
+       int i;
+
+       for (i = 0; i < PM2XXX_NUM_INT_REG; i++) {
+                pm2xxx_reg_read(pm2,
+                               pm2xxx_interrupt_registers[i],
+                               &(interrupt->reg[i]));
+
+               if (interrupt->reg[i] > 0)
+                       interrupt->handler[i](pm2, interrupt->reg[i]);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int pm2xxx_charger_get_ac_cv(struct pm2xxx_charger *pm2)
+{
+       int ret = 0;
+       u8 val;
+
+       if (pm2->ac.charger_connected && pm2->ac.charger_online) {
+
+               ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT4, &val);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+                       goto out;
+               }
+
+               if (val & PM2XXX_INT4_S_ITCVPHASE)
+                       ret = PM2XXX_CONST_VOLT;
+               else
+                       ret = PM2XXX_CONST_CURR;
+       }
+out:
+       return ret;
+}
+
+static int pm2xxx_current_to_regval(int curr)
+{
+       int i;
+
+       if (curr < pm2xxx_charger_current_map[0])
+               return 0;
+
+       for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_current_map); i++) {
+               if (curr < pm2xxx_charger_current_map[i])
+                       return (i - 1);
+       }
+
+       i = ARRAY_SIZE(pm2xxx_charger_current_map) - 1;
+       if (curr == pm2xxx_charger_current_map[i])
+               return i;
+       else
+               return -EINVAL;
+}
+
+static int pm2xxx_voltage_to_regval(int curr)
+{
+       int i;
+
+       if (curr < pm2xxx_charger_voltage_map[0])
+               return 0;
+
+       for (i = 1; i < ARRAY_SIZE(pm2xxx_charger_voltage_map); i++) {
+               if (curr < pm2xxx_charger_voltage_map[i])
+                       return i - 1;
+       }
+
+       i = ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1;
+       if (curr == pm2xxx_charger_voltage_map[i])
+               return i;
+       else
+               return -EINVAL;
+}
+
+static int pm2xxx_charger_update_charger_current(struct ux500_charger *charger,
+               int ich_out)
+{
+       int ret;
+       int curr_index;
+       struct pm2xxx_charger *pm2;
+       u8 val;
+
+       if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+               pm2 = to_pm2xxx_charger_ac_device_info(charger);
+       else
+               return -ENXIO;
+
+       curr_index = pm2xxx_current_to_regval(ich_out);
+       if (curr_index < 0) {
+               dev_err(pm2->dev,
+                       "Charger current too high, charging not started\n");
+               return -ENXIO;
+       }
+
+       ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
+       if (ret >= 0) {
+               val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
+               val |= curr_index;
+               ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
+               if (ret < 0) {
+                       dev_err(pm2->dev,
+                               "%s write failed\n", __func__);
+               }
+       }
+       else
+               dev_err(pm2->dev, "%s read failed\n", __func__);
+
+       return ret;
+}
+
+static int pm2xxx_charger_ac_get_property(struct power_supply *psy,
+       enum power_supply_property psp,
+       union power_supply_propval *val)
+{
+       struct pm2xxx_charger *pm2;
+
+       pm2 = to_pm2xxx_charger_ac_device_info(psy_to_ux500_charger(psy));
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_HEALTH:
+               if (pm2->flags.mainextchnotok)
+                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+               else if (pm2->ac.wd_expired)
+                       val->intval = POWER_SUPPLY_HEALTH_DEAD;
+               else if (pm2->flags.main_thermal_prot)
+                       val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+               else
+                       val->intval = POWER_SUPPLY_HEALTH_GOOD;
+               break;
+       case POWER_SUPPLY_PROP_ONLINE:
+               val->intval = pm2->ac.charger_online;
+               break;
+       case POWER_SUPPLY_PROP_PRESENT:
+               val->intval = pm2->ac.charger_connected;
+               break;
+       case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+               pm2->ac.cv_active = pm2xxx_charger_get_ac_cv(pm2);
+               val->intval = pm2->ac.cv_active;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int pm2xxx_charging_init(struct pm2xxx_charger *pm2)
+{
+       int ret = 0;
+
+       /* enable CC and CV watchdog */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG3,
+               (PM2XXX_CH_WD_CV_PHASE_60MIN | PM2XXX_CH_WD_CC_PHASE_60MIN));
+       if( ret < 0)
+               return ret;
+
+       /* enable precharge watchdog */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG4,
+                                       PM2XXX_CH_WD_PRECH_PHASE_60MIN);
+
+       /* Disable auto timeout */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG5,
+                                       PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN);
+
+       /*
+     * EOC current level = 100mA
+        * Precharge current level = 100mA
+        * CC current level = 1000mA
+        */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6,
+               (PM2XXX_DIR_CH_CC_CURRENT_1000MA |
+               PM2XXX_CH_PRECH_CURRENT_100MA |
+               PM2XXX_CH_EOC_CURRENT_100MA));
+
+       /*
+     * recharge threshold = 3.8V
+        * Precharge to CC threshold = 2.9V
+        */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG7,
+               (PM2XXX_CH_PRECH_VOL_2_9 | PM2XXX_CH_VRESUME_VOL_3_8));
+
+       /* float voltage charger level = 4.2V */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8,
+               PM2XXX_CH_VOLT_4_2);
+
+       /* Voltage drop between VBAT and VSYS in HW charging = 300mV */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG9,
+               (PM2XXX_CH_150MV_DROP_300MV | PM2XXX_CHARCHING_INFO_DIS |
+               PM2XXX_CH_CC_REDUCED_CURRENT_IDENT |
+               PM2XXX_CH_CC_MODEDROP_DIS));
+
+       /* Input charger level of over voltage = 10V */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR2,
+                                       PM2XXX_VPWR2_OVV_10);
+       ret = pm2xxx_reg_write(pm2, PM2XXX_INP_VOLT_VPWR1,
+                                       PM2XXX_VPWR1_OVV_10);
+
+       /* Input charger drop */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR2,
+               (PM2XXX_VPWR2_HW_OPT_DIS | PM2XXX_VPWR2_VALID_DIS |
+               PM2XXX_VPWR2_DROP_DIS));
+       ret = pm2xxx_reg_write(pm2, PM2XXX_INP_DROP_VPWR1,
+               (PM2XXX_VPWR1_HW_OPT_DIS | PM2XXX_VPWR1_VALID_DIS |
+               PM2XXX_VPWR1_DROP_DIS));
+
+       /* Disable battery low monitoring */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_LOW_LEV_COMP_REG,
+               PM2XXX_VBAT_LOW_MONITORING_ENA);
+
+       /* Disable LED */
+       ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG,
+               PM2XXX_LED_SELECT_DIS);
+
+       return ret;
+}
+
+static int pm2xxx_charger_ac_en(struct ux500_charger *charger,
+       int enable, int vset, int iset)
+{
+       int ret;
+       int volt_index;
+       int curr_index;
+       u8 val;
+
+       struct pm2xxx_charger *pm2 = to_pm2xxx_charger_ac_device_info(charger);
+
+       if (enable) {
+               if (!pm2->ac.charger_connected) {
+                       dev_dbg(pm2->dev, "AC charger not connected\n");
+                       return -ENXIO;
+               }
+
+               dev_dbg(pm2->dev, "Enable AC: %dmV %dmA\n", vset, iset);
+               if (!pm2->vddadc_en_ac) {
+                       regulator_enable(pm2->regu);
+                       pm2->vddadc_en_ac = true;
+               }
+
+               ret = pm2xxx_charging_init(pm2);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "%s charging init failed\n",
+                                       __func__);
+                       goto error_occured;
+               }
+
+               volt_index = pm2xxx_voltage_to_regval(vset);
+               curr_index = pm2xxx_current_to_regval(iset);
+
+               if (volt_index < 0 || curr_index < 0) {
+                       dev_err(pm2->dev,
+                               "Charger voltage or current too high, "
+                               "charging not started\n");
+                       return -ENXIO;
+               }
+
+               ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG8, &val);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+                       goto error_occured;
+               }
+               val &= ~PM2XXX_CH_VOLT_MASK;
+               val |= volt_index;
+               ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG8, val);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+                       goto error_occured;
+               }
+
+               ret = pm2xxx_reg_read(pm2, PM2XXX_BATT_CTRL_REG6, &val);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+                       goto error_occured;
+               }
+               val &= ~PM2XXX_DIR_CH_CC_CURRENT_MASK;
+               val |= curr_index;
+               ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG6, val);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+                       goto error_occured;
+               }
+
+               if (!pm2->bat->enable_overshoot) {
+                       ret = pm2xxx_reg_read(pm2, PM2XXX_LED_CTRL_REG, &val);
+                       if (ret < 0) {
+                               dev_err(pm2->dev, "%s pm2xxx read failed\n",
+                                                               __func__);
+                               goto error_occured;
+                       }
+                       val |= PM2XXX_ANTI_OVERSHOOT_EN;
+                       ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG, val);
+                       if (ret < 0) {
+                               dev_err(pm2->dev, "%s pm2xxx write failed\n",
+                                                               __func__);
+                               goto error_occured;
+                       }
+               }
+
+               ret = pm2xxx_charging_enable_mngt(pm2);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "Failed to enable"
+                                               "pm2xxx ac charger\n");
+                       goto error_occured;
+               }
+
+               pm2->ac.charger_online = 1;
+       } else {
+               pm2->ac.charger_online = 0;
+               pm2->ac.wd_expired = false;
+
+               /* Disable regulator if enabled */
+               if (pm2->vddadc_en_ac) {
+                       regulator_disable(pm2->regu);
+                       pm2->vddadc_en_ac = false;
+               }
+
+               ret = pm2xxx_charging_disable_mngt(pm2);
+               if (ret < 0) {
+                       dev_err(pm2->dev, "failed to disable"
+                                               "pm2xxx ac charger\n");
+                       goto error_occured;
+               }
+
+               dev_dbg(pm2->dev, "PM2301: " "Disabled AC charging\n");
+       }
+       power_supply_changed(&pm2->ac_chg.psy);
+
+error_occured:
+       return ret;
+}
+
+static int pm2xxx_charger_watchdog_kick(struct ux500_charger *charger)
+{
+       int ret;
+       struct pm2xxx_charger *pm2;
+
+       if (charger->psy.type == POWER_SUPPLY_TYPE_MAINS)
+               pm2 = to_pm2xxx_charger_ac_device_info(charger);
+       else
+               return -ENXIO;
+
+       ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_WD_KICK, WD_TIMER);
+       if (ret)
+               dev_err(pm2->dev, "Failed to kick WD!\n");
+
+       return ret;
+}
+
+static void pm2xxx_charger_ac_work(struct work_struct *work)
+{
+       struct pm2xxx_charger *pm2 = container_of(work,
+               struct pm2xxx_charger, ac_work);
+
+
+       power_supply_changed(&pm2->ac_chg.psy);
+       sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
+};
+
+static void pm2xxx_charger_check_main_thermal_prot_work(
+       struct work_struct *work)
+{
+};
+
+static struct pm2xxx_interrupts pm2xxx_int = {
+       .handler[0] = pm2_int_reg0,
+       .handler[1] = pm2_int_reg1,
+       .handler[2] = pm2_int_reg2,
+       .handler[3] = pm2_int_reg3,
+       .handler[4] = pm2_int_reg4,
+       .handler[5] = pm2_int_reg5,
+};
+
+static struct pm2xxx_irq pm2xxx_charger_irq[] = {
+       {"PM2XXX_IRQ_INT", pm2xxx_irq_int},
+};
+
+static int pm2xxx_wall_charger_resume(struct i2c_client *i2c_client)
+{
+       return 0;
+}
+
+static int pm2xxx_wall_charger_suspend(struct i2c_client *i2c_client,
+       pm_message_t state)
+{
+       return 0;
+}
+
+static int __devinit pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
+               const struct i2c_device_id *id)
+{
+       struct pm2xxx_platform_data *pl_data = i2c_client->dev.platform_data;
+       struct pm2xxx_charger *pm2;
+       int ret = 0;
+       u8 val;
+
+       pm2 = kzalloc(sizeof(struct pm2xxx_charger), GFP_KERNEL);
+       if (!pm2) {
+               dev_err(pm2->dev, "pm2xxx_charger allocation failed\n");
+               return -ENOMEM;
+       }
+
+       /* get parent data */
+       pm2->dev = &i2c_client->dev;
+       pm2->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+
+       pm2->pm2_int = &pm2xxx_int;
+
+       /* get charger spcific platform data */
+       if (!pl_data->wall_charger) {
+               dev_err(pm2->dev, "no charger platform data supplied\n");
+               ret = -EINVAL;
+               goto free_device_info;
+       }
+
+       pm2->pdata = pl_data->wall_charger;
+
+       /* get battery specific platform data */
+       if (!pl_data->battery) {
+               dev_err(pm2->dev, "no battery platform data supplied\n");
+               ret = -EINVAL;
+               goto free_device_info;
+       }
+
+       pm2->bat = pl_data->battery;
+
+       /*get lpn GPIO from platform data*/
+       if (!pm2->pdata->lpn_gpio) {
+               dev_err(pm2->dev, "no lpn gpio data supplied\n");
+               ret = -EINVAL;
+               goto free_device_info;
+       }
+       pm2->lpn_pin = pm2->pdata->lpn_gpio;
+
+       if (!i2c_check_functionality(i2c_client->adapter,
+                       I2C_FUNC_SMBUS_BYTE_DATA |
+                       I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+               ret = -ENODEV;
+               dev_info(pm2->dev, "pm2301 i2c_check_functionality failed\n");
+               goto free_device_info;
+       }
+
+       pm2->config.pm2xxx_i2c = i2c_client;
+       pm2->config.pm2xxx_id = (struct i2c_device_id *) id;
+       i2c_set_clientdata(i2c_client, pm2);
+
+       /* AC supply */
+       /* power_supply base class */
+       pm2->ac_chg.psy.name = pm2->pdata->label;
+       pm2->ac_chg.psy.type = POWER_SUPPLY_TYPE_MAINS;
+       pm2->ac_chg.psy.properties = pm2xxx_charger_ac_props;
+       pm2->ac_chg.psy.num_properties = ARRAY_SIZE(pm2xxx_charger_ac_props);
+       pm2->ac_chg.psy.get_property = pm2xxx_charger_ac_get_property;
+       pm2->ac_chg.psy.supplied_to = pm2->pdata->supplied_to;
+       pm2->ac_chg.psy.num_supplicants = pm2->pdata->num_supplicants;
+       /* pm2xxx_charger sub-class */
+       pm2->ac_chg.ops.enable = &pm2xxx_charger_ac_en;
+       pm2->ac_chg.ops.kick_wd = &pm2xxx_charger_watchdog_kick;
+       pm2->ac_chg.ops.update_curr = &pm2xxx_charger_update_charger_current;
+       pm2->ac_chg.max_out_volt = pm2xxx_charger_voltage_map[
+               ARRAY_SIZE(pm2xxx_charger_voltage_map) - 1];
+       pm2->ac_chg.max_out_curr = pm2xxx_charger_current_map[
+               ARRAY_SIZE(pm2xxx_charger_current_map) - 1];
+       pm2->ac_chg.wdt_refresh = WD_KICK_INTERVAL;
+       pm2->ac_chg.enabled = true;
+       pm2->ac_chg.external = true;
+
+       /* Create a work queue for the charger */
+       pm2->charger_wq =
+               create_singlethread_workqueue("pm2xxx_charger_wq");
+       if (pm2->charger_wq == NULL) {
+               dev_err(pm2->dev, "failed to create work queue\n");
+               goto free_device_info;
+       }
+
+       /* Init work for charger detection */
+       INIT_WORK(&pm2->ac_work, pm2xxx_charger_ac_work);
+
+       /* Init work for checking HW status */
+       INIT_WORK(&pm2->check_main_thermal_prot_work,
+               pm2xxx_charger_check_main_thermal_prot_work);
+
+       /*
+        * VDD ADC supply needs to be enabled from this driver when there
+        * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
+        * interrupts during charging
+        */
+       pm2->regu = regulator_get(pm2->dev, "vddadc");
+       if (IS_ERR(pm2->regu)) {
+               ret = PTR_ERR(pm2->regu);
+               dev_err(pm2->dev, "failed to get vddadc regulator\n");
+               goto free_charger_wq;
+       }
+
+       /* Register AC charger class */
+       ret = power_supply_register(pm2->dev, &pm2->ac_chg.psy);
+       if (ret) {
+               dev_err(pm2->dev, "failed to register AC charger\n");
+               goto free_regulator;
+       }
+
+       /* Register interrupts */
+       ret = request_threaded_irq(pm2->pdata->irq_number, NULL,
+                               pm2xxx_charger_irq[0].isr,
+                               pm2->pdata->irq_type,
+                               pm2xxx_charger_irq[0].name, pm2);
+
+       if (ret != 0) {
+               dev_err(pm2->dev, "failed to request %s IRQ %d: %d\n",
+               pm2xxx_charger_irq[0].name, pm2->pdata->irq_number, ret);
+               goto unregister_pm2xxx_charger;
+       }
+
+       /*Initialize lock*/
+       mutex_init(&pm2->lock);
+
+       /*
+        * Charger detection mechanism requires pulling up the LPN pin
+        * while i2c communication if Charger is not connected
+        * LPN pin of PM2301 is GPIO60 of AB9540
+        */
+       ret = gpio_request(pm2->lpn_pin, "pm2301_lpm_gpio");
+       if (ret < 0) {
+               dev_err(pm2->dev, "pm2301_lpm_gpio request failed\n");
+               goto unregister_pm2xxx_charger;
+       }
+       ret = gpio_direction_output(pm2->lpn_pin, 0);
+       if (ret < 0) {
+               dev_err(pm2->dev, "pm2301_lpm_gpio direction failed\n");
+               goto free_gpio;
+       }
+
+       ret = pm2xxx_charger_detection(pm2, &val);
+
+       if ((ret == 0) && val) {
+               pm2->ac.charger_connected = 1;
+               pm2->ac_conn = true;
+               power_supply_changed(&pm2->ac_chg.psy);
+               sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
+       }
+
+       return 0;
+
+free_gpio:
+       gpio_free(pm2->lpn_pin);
+unregister_pm2xxx_charger:
+       /* unregister power supply */
+       power_supply_unregister(&pm2->ac_chg.psy);
+free_regulator:
+       /* disable the regulator */
+       regulator_put(pm2->regu);
+free_charger_wq:
+       destroy_workqueue(pm2->charger_wq);
+free_device_info:
+       kfree(pm2);
+       return ret;
+}
+
+static int __devexit pm2xxx_wall_charger_remove(struct i2c_client *i2c_client)
+{
+       struct pm2xxx_charger *pm2 = i2c_get_clientdata(i2c_client);
+
+       /* Disable AC charging */
+       pm2xxx_charger_ac_en(&pm2->ac_chg, false, 0, 0);
+
+       /* Disable interrupts */
+       free_irq(pm2->pdata->irq_number, pm2);
+
+       /* Delete the work queue */
+       destroy_workqueue(pm2->charger_wq);
+
+       flush_scheduled_work();
+
+       /* disable the regulator */
+       regulator_put(pm2->regu);
+
+       power_supply_unregister(&pm2->ac_chg.psy);
+
+       /*Free GPIO60*/
+       gpio_free(pm2->lpn_pin);
+
+       kfree(pm2);
+
+       return 0;
+}
+
+static const struct i2c_device_id pm2xxx_id[] = {
+       { "pm2301", 0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(i2c, pm2xxx_id);
+
+static struct i2c_driver pm2xxx_charger_driver = {
+       .probe = pm2xxx_wall_charger_probe,
+       .remove = __devexit_p(pm2xxx_wall_charger_remove),
+       .suspend = pm2xxx_wall_charger_suspend,
+       .resume = pm2xxx_wall_charger_resume,
+       .driver = {
+               .name = "pm2xxx-wall_charger",
+               .owner = THIS_MODULE,
+       },
+       .id_table = pm2xxx_id,
+};
+
+static int __init pm2xxx_charger_init(void)
+{
+       return i2c_add_driver(&pm2xxx_charger_driver);
+}
+
+static void __exit pm2xxx_charger_exit(void)
+{
+       i2c_del_driver(&pm2xxx_charger_driver);
+}
+
+subsys_initcall_sync(pm2xxx_charger_init);
+module_exit(pm2xxx_charger_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
+MODULE_ALIAS("platform:pm2xxx-charger");
+MODULE_DESCRIPTION("PM2xxx charger management driver");
+
diff --git a/drivers/power/pm2301_charger.h b/drivers/power/pm2301_charger.h
new file mode 100644 (file)
index 0000000..e6319cd
--- /dev/null
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * PM2301 power supply interface
+ *
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef PM2301_CHARGER_H
+#define PM2301_CHARGER_H
+
+#define MAIN_WDOG_ENA                  0x01
+#define MAIN_WDOG_KICK                 0x02
+#define MAIN_WDOG_DIS                  0x00
+#define CHARG_WD_KICK                  0x01
+#define MAIN_CH_ENA                    0x01
+#define MAIN_CH_NO_OVERSHOOT_ENA_N     0x02
+#define MAIN_CH_DET                    0x01
+#define MAIN_CH_CV_ON                  0x04
+#define OTP_ENABLE_WD                  0x01
+
+#define MAIN_CH_INPUT_CURR_SHIFT       4
+
+#define LED_INDICATOR_PWM_ENA          0x01
+#define LED_INDICATOR_PWM_DIS          0x00
+#define LED_IND_CUR_5MA                        0x04
+#define LED_INDICATOR_PWM_DUTY_252_256 0xBF
+
+/* HW failure constants */
+#define MAIN_CH_TH_PROT                        0x02
+#define MAIN_CH_NOK                    0x01
+
+/* Watchdog timeout constant */
+#define WD_TIMER                       0x30 /* 4min */
+#define WD_KICK_INTERVAL               (30 * HZ)
+
+#define PM2XXX_NUM_INT_REG             0x6
+
+/* Constant voltage/current */
+#define PM2XXX_CONST_CURR              0x0
+#define PM2XXX_CONST_VOLT              0x1
+
+/* Lowest charger voltage is 3.39V -> 0x4E */
+#define LOW_VOLT_REG                   0x4E
+
+#define PM2XXX_BATT_CTRL_REG1          0x00
+#define PM2XXX_BATT_CTRL_REG2          0x01
+#define PM2XXX_BATT_CTRL_REG3          0x02
+#define PM2XXX_BATT_CTRL_REG4          0x03
+#define PM2XXX_BATT_CTRL_REG5          0x04
+#define PM2XXX_BATT_CTRL_REG6          0x05
+#define PM2XXX_BATT_CTRL_REG7          0x06
+#define PM2XXX_BATT_CTRL_REG8          0x07
+#define PM2XXX_NTC_CTRL_REG1           0x08
+#define PM2XXX_NTC_CTRL_REG2           0x09
+#define PM2XXX_BATT_CTRL_REG9          0x0A
+#define PM2XXX_BATT_STAT_REG1          0x0B
+#define PM2XXX_INP_VOLT_VPWR2          0x11
+#define PM2XXX_INP_DROP_VPWR2          0x13
+#define PM2XXX_INP_VOLT_VPWR1          0x15
+#define PM2XXX_INP_DROP_VPWR1          0x17
+#define PM2XXX_INP_MODE_VPWR           0x18
+#define PM2XXX_BATT_WD_KICK            0x70
+#define PM2XXX_DEV_VER_STAT            0x0C
+#define PM2XXX_THERM_WARN_CTRL_REG     0x20
+#define PM2XXX_BATT_DISC_REG           0x21
+#define PM2XXX_BATT_LOW_LEV_COMP_REG   0x22
+#define PM2XXX_BATT_LOW_LEV_VAL_REG    0x23
+#define PM2XXX_I2C_PAD_CTRL_REG                0x24
+#define PM2XXX_SW_CTRL_REG             0x26
+#define PM2XXX_LED_CTRL_REG            0x28
+
+#define PM2XXX_REG_INT1                        0x40
+#define PM2XXX_MASK_REG_INT1           0x50
+#define PM2XXX_SRCE_REG_INT1           0x60
+#define PM2XXX_REG_INT2                        0x41
+#define PM2XXX_MASK_REG_INT2           0x51
+#define PM2XXX_SRCE_REG_INT2           0x61
+#define PM2XXX_REG_INT3                        0x42
+#define PM2XXX_MASK_REG_INT3           0x52
+#define PM2XXX_SRCE_REG_INT3           0x62
+#define PM2XXX_REG_INT4                        0x43
+#define PM2XXX_MASK_REG_INT4           0x53
+#define PM2XXX_SRCE_REG_INT4           0x63
+#define PM2XXX_REG_INT5                        0x44
+#define PM2XXX_MASK_REG_INT5           0x54
+#define PM2XXX_SRCE_REG_INT5           0x64
+#define PM2XXX_REG_INT6                        0x45
+#define PM2XXX_MASK_REG_INT6           0x55
+#define PM2XXX_SRCE_REG_INT6           0x65
+
+#define VPWR_OVV                       0x0
+#define VSYSTEM_OVV                    0x1
+
+/* control Reg 1 */
+#define PM2XXX_CH_RESUME_EN            0x1
+#define PM2XXX_CH_RESUME_DIS           0x0
+
+/* control Reg 2 */
+#define PM2XXX_CH_AUTO_RESUME_EN       0X2
+#define PM2XXX_CH_AUTO_RESUME_DIS      0X0
+#define PM2XXX_CHARGER_ENA             0x4
+#define PM2XXX_CHARGER_DIS             0x0
+
+/* control Reg 3 */
+#define PM2XXX_CH_WD_CC_PHASE_OFF      0x0
+#define PM2XXX_CH_WD_CC_PHASE_5MIN     0x1
+#define PM2XXX_CH_WD_CC_PHASE_10MIN    0x2
+#define PM2XXX_CH_WD_CC_PHASE_30MIN    0x3
+#define PM2XXX_CH_WD_CC_PHASE_60MIN    0x4
+#define PM2XXX_CH_WD_CC_PHASE_120MIN   0x5
+#define PM2XXX_CH_WD_CC_PHASE_240MIN   0x6
+#define PM2XXX_CH_WD_CC_PHASE_360MIN   0x7
+
+#define PM2XXX_CH_WD_CV_PHASE_OFF      (0x0<<3)
+#define PM2XXX_CH_WD_CV_PHASE_5MIN     (0x1<<3)
+#define PM2XXX_CH_WD_CV_PHASE_10MIN    (0x2<<3)
+#define PM2XXX_CH_WD_CV_PHASE_30MIN    (0x3<<3)
+#define PM2XXX_CH_WD_CV_PHASE_60MIN    (0x4<<3)
+#define PM2XXX_CH_WD_CV_PHASE_120MIN   (0x5<<3)
+#define PM2XXX_CH_WD_CV_PHASE_240MIN   (0x6<<3)
+#define PM2XXX_CH_WD_CV_PHASE_360MIN   (0x7<<3)
+
+/* control Reg 4 */
+#define PM2XXX_CH_WD_PRECH_PHASE_OFF   0x0
+#define PM2XXX_CH_WD_PRECH_PHASE_1MIN  0x1
+#define PM2XXX_CH_WD_PRECH_PHASE_5MIN  0x2
+#define PM2XXX_CH_WD_PRECH_PHASE_10MIN 0x3
+#define PM2XXX_CH_WD_PRECH_PHASE_30MIN 0x4
+#define PM2XXX_CH_WD_PRECH_PHASE_60MIN 0x5
+#define PM2XXX_CH_WD_PRECH_PHASE_120MIN        0x6
+#define PM2XXX_CH_WD_PRECH_PHASE_240MIN        0x7
+
+/* control Reg 5 */
+#define PM2XXX_CH_WD_AUTO_TIMEOUT_NONE 0x0
+#define PM2XXX_CH_WD_AUTO_TIMEOUT_20MIN        0x1
+
+/* control Reg 6 */
+#define PM2XXX_DIR_CH_CC_CURRENT_MASK  0x0F
+#define PM2XXX_DIR_CH_CC_CURRENT_200MA 0x0
+#define PM2XXX_DIR_CH_CC_CURRENT_400MA 0x2
+#define PM2XXX_DIR_CH_CC_CURRENT_600MA 0x3
+#define PM2XXX_DIR_CH_CC_CURRENT_800MA 0x4
+#define PM2XXX_DIR_CH_CC_CURRENT_1000MA        0x5
+#define PM2XXX_DIR_CH_CC_CURRENT_1200MA        0x6
+#define PM2XXX_DIR_CH_CC_CURRENT_1400MA        0x7
+#define PM2XXX_DIR_CH_CC_CURRENT_1600MA        0x8
+#define PM2XXX_DIR_CH_CC_CURRENT_1800MA        0x9
+#define PM2XXX_DIR_CH_CC_CURRENT_2000MA        0xA
+#define PM2XXX_DIR_CH_CC_CURRENT_2200MA        0xB
+#define PM2XXX_DIR_CH_CC_CURRENT_2400MA        0xC
+#define PM2XXX_DIR_CH_CC_CURRENT_2600MA        0xD
+#define PM2XXX_DIR_CH_CC_CURRENT_2800MA        0xE
+#define PM2XXX_DIR_CH_CC_CURRENT_3000MA        0xF
+
+#define PM2XXX_CH_PRECH_CURRENT_MASK   0x30
+#define PM2XXX_CH_PRECH_CURRENT_25MA   (0x0<<4)
+#define PM2XXX_CH_PRECH_CURRENT_50MA   (0x1<<4)
+#define PM2XXX_CH_PRECH_CURRENT_75MA   (0x2<<4)
+#define PM2XXX_CH_PRECH_CURRENT_100MA  (0x3<<4)
+
+#define PM2XXX_CH_EOC_CURRENT_MASK     0xC0
+#define PM2XXX_CH_EOC_CURRENT_100MA    (0x0<<6)
+#define PM2XXX_CH_EOC_CURRENT_150MA    (0x1<<6)
+#define PM2XXX_CH_EOC_CURRENT_300MA    (0x2<<6)
+#define PM2XXX_CH_EOC_CURRENT_400MA    (0x3<<6)
+
+/* control Reg 7 */
+#define PM2XXX_CH_PRECH_VOL_2_5                0x0
+#define PM2XXX_CH_PRECH_VOL_2_7                0x1
+#define PM2XXX_CH_PRECH_VOL_2_9                0x2
+#define PM2XXX_CH_PRECH_VOL_3_1                0x3
+
+#define PM2XXX_CH_VRESUME_VOL_3_2      (0x0<<2)
+#define PM2XXX_CH_VRESUME_VOL_3_4      (0x1<<2)
+#define PM2XXX_CH_VRESUME_VOL_3_6      (0x2<<2)
+#define PM2XXX_CH_VRESUME_VOL_3_8      (0x3<<2)
+
+/* control Reg 8 */
+#define PM2XXX_CH_VOLT_MASK            0x3F
+#define PM2XXX_CH_VOLT_3_5             0x0
+#define PM2XXX_CH_VOLT_3_5225          0x1
+#define PM2XXX_CH_VOLT_3_6             0x4
+#define PM2XXX_CH_VOLT_3_7             0x8
+#define PM2XXX_CH_VOLT_4_0             0x14
+#define PM2XXX_CH_VOLT_4_175           0x1B
+#define PM2XXX_CH_VOLT_4_2             0x1C
+#define PM2XXX_CH_VOLT_4_275           0x1F
+#define PM2XXX_CH_VOLT_4_3             0x20
+
+/*NTC control register 1*/
+#define PM2XXX_BTEMP_HIGH_TH_45                0x0
+#define PM2XXX_BTEMP_HIGH_TH_50                0x1
+#define PM2XXX_BTEMP_HIGH_TH_55                0x2
+#define PM2XXX_BTEMP_HIGH_TH_60                0x3
+#define PM2XXX_BTEMP_HIGH_TH_65                0x4
+
+#define PM2XXX_BTEMP_LOW_TH_N5         (0x0<<3)
+#define PM2XXX_BTEMP_LOW_TH_0          (0x1<<3)
+#define PM2XXX_BTEMP_LOW_TH_5          (0x2<<3)
+#define PM2XXX_BTEMP_LOW_TH_10         (0x3<<3)
+
+/*NTC control register 2*/
+#define PM2XXX_NTC_BETA_COEFF_3477     0x0
+#define PM2XXX_NTC_BETA_COEFF_3964     0x1
+
+#define PM2XXX_NTC_RES_10K             (0x0<<2)
+#define PM2XXX_NTC_RES_47K             (0x1<<2)
+#define PM2XXX_NTC_RES_100K            (0x2<<2)
+#define PM2XXX_NTC_RES_NO_NTC          (0x3<<2)
+
+/* control Reg 9 */
+#define PM2XXX_CH_CC_MODEDROP_EN       1
+#define PM2XXX_CH_CC_MODEDROP_DIS      0
+
+#define PM2XXX_CH_CC_REDUCED_CURRENT_100MA     (0x0<<1)
+#define PM2XXX_CH_CC_REDUCED_CURRENT_200MA     (0x1<<1)
+#define PM2XXX_CH_CC_REDUCED_CURRENT_400MA     (0x2<<1)
+#define PM2XXX_CH_CC_REDUCED_CURRENT_IDENT     (0x3<<1)
+
+#define PM2XXX_CHARCHING_INFO_DIS      (0<<3)
+#define PM2XXX_CHARCHING_INFO_EN       (1<<3)
+
+#define PM2XXX_CH_150MV_DROP_300MV     (0<<4)
+#define PM2XXX_CH_150MV_DROP_150MV     (1<<4)
+
+
+/* charger status register */
+#define PM2XXX_CHG_STATUS_OFF          0x0
+#define PM2XXX_CHG_STATUS_ON           0x1
+#define PM2XXX_CHG_STATUS_FULL         0x2
+#define PM2XXX_CHG_STATUS_ERR          0x3
+#define PM2XXX_CHG_STATUS_WAIT         0x4
+#define PM2XXX_CHG_STATUS_NOBAT                0x5
+
+/* Input charger voltage VPWR2 */
+#define PM2XXX_VPWR2_OVV_6_0           0x0
+#define PM2XXX_VPWR2_OVV_6_3           0x1
+#define PM2XXX_VPWR2_OVV_10            0x2
+#define PM2XXX_VPWR2_OVV_NONE          0x3
+
+/* Input charger drop VPWR2 */
+#define PM2XXX_VPWR2_HW_OPT_EN         (0x1<<4)
+#define PM2XXX_VPWR2_HW_OPT_DIS                (0x0<<4)
+
+#define PM2XXX_VPWR2_VALID_EN          (0x1<<3)
+#define PM2XXX_VPWR2_VALID_DIS         (0x0<<3)
+
+#define PM2XXX_VPWR2_DROP_EN           (0x1<<2)
+#define PM2XXX_VPWR2_DROP_DIS          (0x0<<2)
+
+/* Input charger voltage VPWR1 */
+#define PM2XXX_VPWR1_OVV_6_0           0x0
+#define PM2XXX_VPWR1_OVV_6_3           0x1
+#define PM2XXX_VPWR1_OVV_10            0x2
+#define PM2XXX_VPWR1_OVV_NONE          0x3
+
+/* Input charger drop VPWR1 */
+#define PM2XXX_VPWR1_HW_OPT_EN         (0x1<<4)
+#define PM2XXX_VPWR1_HW_OPT_DIS                (0x0<<4)
+
+#define PM2XXX_VPWR1_VALID_EN          (0x1<<3)
+#define PM2XXX_VPWR1_VALID_DIS         (0x0<<3)
+
+#define PM2XXX_VPWR1_DROP_EN           (0x1<<2)
+#define PM2XXX_VPWR1_DROP_DIS          (0x0<<2)
+
+/* Battery low level comparator control register */
+#define PM2XXX_VBAT_LOW_MONITORING_DIS 0x0
+#define PM2XXX_VBAT_LOW_MONITORING_ENA 0x1
+
+/* Battery low level value control register */
+#define PM2XXX_VBAT_LOW_LEVEL_2_3      0x0
+#define PM2XXX_VBAT_LOW_LEVEL_2_4      0x1
+#define PM2XXX_VBAT_LOW_LEVEL_2_5      0x2
+#define PM2XXX_VBAT_LOW_LEVEL_2_6      0x3
+#define PM2XXX_VBAT_LOW_LEVEL_2_7      0x4
+#define PM2XXX_VBAT_LOW_LEVEL_2_8      0x5
+#define PM2XXX_VBAT_LOW_LEVEL_2_9      0x6
+#define PM2XXX_VBAT_LOW_LEVEL_3_0      0x7
+#define PM2XXX_VBAT_LOW_LEVEL_3_1      0x8
+#define PM2XXX_VBAT_LOW_LEVEL_3_2      0x9
+#define PM2XXX_VBAT_LOW_LEVEL_3_3      0xA
+#define PM2XXX_VBAT_LOW_LEVEL_3_4      0xB
+#define PM2XXX_VBAT_LOW_LEVEL_3_5      0xC
+#define PM2XXX_VBAT_LOW_LEVEL_3_6      0xD
+#define PM2XXX_VBAT_LOW_LEVEL_3_7      0xE
+#define PM2XXX_VBAT_LOW_LEVEL_3_8      0xF
+#define PM2XXX_VBAT_LOW_LEVEL_3_9      0x10
+#define PM2XXX_VBAT_LOW_LEVEL_4_0      0x11
+#define PM2XXX_VBAT_LOW_LEVEL_4_1      0x12
+#define PM2XXX_VBAT_LOW_LEVEL_4_2      0x13
+
+/* SW CTRL */
+#define PM2XXX_SWCTRL_HW               0x0
+#define PM2XXX_SWCTRL_SW               0x1
+
+
+/* LED Driver Control */
+#define PM2XXX_LED_CURRENT_MASK                0x0C
+#define PM2XXX_LED_CURRENT_2_5MA       (0X0<<2)
+#define PM2XXX_LED_CURRENT_1MA         (0X1<<2)
+#define PM2XXX_LED_CURRENT_5MA         (0X2<<2)
+#define PM2XXX_LED_CURRENT_10MA                (0X3<<2)
+
+#define PM2XXX_LED_SELECT_MASK         0x02
+#define PM2XXX_LED_SELECT_EN           (0X0<<1)
+#define PM2XXX_LED_SELECT_DIS          (0X1<<1)
+
+#define PM2XXX_ANTI_OVERSHOOT_MASK     0x01
+#define PM2XXX_ANTI_OVERSHOOT_DIS      0X0
+#define PM2XXX_ANTI_OVERSHOOT_EN       0X1
+
+enum pm2xxx_reg_int1 {
+       PM2XXX_INT1_ITVBATDISCONNECT    = 0x02,
+       PM2XXX_INT1_ITVBATLOWR          = 0x04,
+       PM2XXX_INT1_ITVBATLOWF          = 0x08,
+};
+
+enum pm2xxx_mask_reg_int1 {
+       PM2XXX_INT1_M_ITVBATDISCONNECT  = 0x02,
+       PM2XXX_INT1_M_ITVBATLOWR        = 0x04,
+       PM2XXX_INT1_M_ITVBATLOWF        = 0x08,
+};
+
+enum pm2xxx_source_reg_int1 {
+       PM2XXX_INT1_S_ITVBATDISCONNECT  = 0x02,
+       PM2XXX_INT1_S_ITVBATLOWR        = 0x04,
+       PM2XXX_INT1_S_ITVBATLOWF        = 0x08,
+};
+
+enum pm2xxx_reg_int2 {
+       PM2XXX_INT2_ITVPWR2PLUG         = 0x01,
+       PM2XXX_INT2_ITVPWR2UNPLUG       = 0x02,
+       PM2XXX_INT2_ITVPWR1PLUG         = 0x04,
+       PM2XXX_INT2_ITVPWR1UNPLUG       = 0x08,
+};
+
+enum pm2xxx_mask_reg_int2 {
+       PM2XXX_INT2_M_ITVPWR2PLUG       = 0x01,
+       PM2XXX_INT2_M_ITVPWR2UNPLUG     = 0x02,
+       PM2XXX_INT2_M_ITVPWR1PLUG       = 0x04,
+       PM2XXX_INT2_M_ITVPWR1UNPLUG     = 0x08,
+};
+
+enum pm2xxx_source_reg_int2 {
+       PM2XXX_INT2_S_ITVPWR2PLUG       = 0x03,
+       PM2XXX_INT2_S_ITVPWR1PLUG       = 0x0c,
+};
+
+enum pm2xxx_reg_int3 {
+       PM2XXX_INT3_ITCHPRECHARGEWD     = 0x01,
+       PM2XXX_INT3_ITCHCCWD            = 0x02,
+       PM2XXX_INT3_ITCHCVWD            = 0x04,
+       PM2XXX_INT3_ITAUTOTIMEOUTWD     = 0x08,
+};
+
+enum pm2xxx_mask_reg_int3 {
+       PM2XXX_INT3_M_ITCHPRECHARGEWD   = 0x01,
+       PM2XXX_INT3_M_ITCHCCWD          = 0x02,
+       PM2XXX_INT3_M_ITCHCVWD          = 0x04,
+       PM2XXX_INT3_M_ITAUTOTIMEOUTWD   = 0x08,
+};
+
+enum pm2xxx_source_reg_int3 {
+       PM2XXX_INT3_S_ITCHPRECHARGEWD   = 0x01,
+       PM2XXX_INT3_S_ITCHCCWD          = 0x02,
+       PM2XXX_INT3_S_ITCHCVWD          = 0x04,
+       PM2XXX_INT3_S_ITAUTOTIMEOUTWD   = 0x08,
+};
+
+enum pm2xxx_reg_int4 {
+       PM2XXX_INT4_ITBATTEMPCOLD       = 0x01,
+       PM2XXX_INT4_ITBATTEMPHOT        = 0x02,
+       PM2XXX_INT4_ITVPWR2OVV          = 0x04,
+       PM2XXX_INT4_ITVPWR1OVV          = 0x08,
+       PM2XXX_INT4_ITCHARGINGON        = 0x10,
+       PM2XXX_INT4_ITVRESUME           = 0x20,
+       PM2XXX_INT4_ITBATTFULL          = 0x40,
+       PM2XXX_INT4_ITCVPHASE           = 0x80,
+};
+
+enum pm2xxx_mask_reg_int4 {
+       PM2XXX_INT4_M_ITBATTEMPCOLD     = 0x01,
+       PM2XXX_INT4_M_ITBATTEMPHOT      = 0x02,
+       PM2XXX_INT4_M_ITVPWR2OVV        = 0x04,
+       PM2XXX_INT4_M_ITVPWR1OVV        = 0x08,
+       PM2XXX_INT4_M_ITCHARGINGON      = 0x10,
+       PM2XXX_INT4_M_ITVRESUME         = 0x20,
+       PM2XXX_INT4_M_ITBATTFULL        = 0x40,
+       PM2XXX_INT4_M_ITCVPHASE         = 0x80,
+};
+
+enum pm2xxx_source_reg_int4 {
+       PM2XXX_INT4_S_ITBATTEMPCOLD     = 0x01,
+       PM2XXX_INT4_S_ITBATTEMPHOT      = 0x02,
+       PM2XXX_INT4_S_ITVPWR2OVV        = 0x04,
+       PM2XXX_INT4_S_ITVPWR1OVV        = 0x08,
+       PM2XXX_INT4_S_ITCHARGINGON      = 0x10,
+       PM2XXX_INT4_S_ITVRESUME         = 0x20,
+       PM2XXX_INT4_S_ITBATTFULL        = 0x40,
+       PM2XXX_INT4_S_ITCVPHASE         = 0x80,
+};
+
+enum pm2xxx_reg_int5 {
+       PM2XXX_INT5_ITTHERMALSHUTDOWNRISE       = 0x01,
+       PM2XXX_INT5_ITTHERMALSHUTDOWNFALL       = 0x02,
+       PM2XXX_INT5_ITTHERMALWARNINGRISE        = 0x04,
+       PM2XXX_INT5_ITTHERMALWARNINGFALL        = 0x08,
+       PM2XXX_INT5_ITVSYSTEMOVV                = 0x10,
+};
+
+enum pm2xxx_mask_reg_int5 {
+       PM2XXX_INT5_M_ITTHERMALSHUTDOWNRISE     = 0x01,
+       PM2XXX_INT5_M_ITTHERMALSHUTDOWNFALL     = 0x02,
+       PM2XXX_INT5_M_ITTHERMALWARNINGRISE      = 0x04,
+       PM2XXX_INT5_M_ITTHERMALWARNINGFALL      = 0x08,
+       PM2XXX_INT5_M_ITVSYSTEMOVV              = 0x10,
+};
+
+enum pm2xxx_source_reg_int5 {
+       PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE     = 0x01,
+       PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL     = 0x02,
+       PM2XXX_INT5_S_ITTHERMALWARNINGRISE      = 0x04,
+       PM2XXX_INT5_S_ITTHERMALWARNINGFALL      = 0x08,
+       PM2XXX_INT5_S_ITVSYSTEMOVV              = 0x10,
+};
+
+enum pm2xxx_reg_int6 {
+       PM2XXX_INT6_ITVPWR2DROP         = 0x01,
+       PM2XXX_INT6_ITVPWR1DROP         = 0x02,
+       PM2XXX_INT6_ITVPWR2VALIDRISE    = 0x04,
+       PM2XXX_INT6_ITVPWR2VALIDFALL    = 0x08,
+       PM2XXX_INT6_ITVPWR1VALIDRISE    = 0x10,
+       PM2XXX_INT6_ITVPWR1VALIDFALL    = 0x20,
+};
+
+enum pm2xxx_mask_reg_int6 {
+       PM2XXX_INT6_M_ITVPWR2DROP       = 0x01,
+       PM2XXX_INT6_M_ITVPWR1DROP       = 0x02,
+       PM2XXX_INT6_M_ITVPWR2VALIDRISE  = 0x04,
+       PM2XXX_INT6_M_ITVPWR2VALIDFALL  = 0x08,
+       PM2XXX_INT6_M_ITVPWR1VALIDRISE  = 0x10,
+       PM2XXX_INT6_M_ITVPWR1VALIDFALL  = 0x20,
+};
+
+enum pm2xxx_source_reg_int6 {
+       PM2XXX_INT6_S_ITVPWR2DROP       = 0x01,
+       PM2XXX_INT6_S_ITVPWR1DROP       = 0x02,
+       PM2XXX_INT6_S_ITVPWR2VALIDRISE  = 0x04,
+       PM2XXX_INT6_S_ITVPWR2VALIDFALL  = 0x08,
+       PM2XXX_INT6_S_ITVPWR1VALIDRISE  = 0x10,
+       PM2XXX_INT6_S_ITVPWR1VALIDFALL  = 0x20,
+};
+
+struct pm2xxx_charger_info {
+       int charger_connected;
+       int charger_online;
+       int cv_active;
+       bool wd_expired;
+};
+
+struct pm2xxx_charger_event_flags {
+       bool mainextchnotok;
+       bool main_thermal_prot;
+       bool ovv;
+       bool chgwdexp;
+};
+
+struct pm2xxx_interrupts {
+       u8 reg[PM2XXX_NUM_INT_REG];
+       int (*handler[PM2XXX_NUM_INT_REG])(void *, int);
+};
+
+struct pm2xxx_config {
+       struct i2c_client *pm2xxx_i2c;
+       struct i2c_device_id *pm2xxx_id;
+};
+
+struct pm2xxx_irq {
+       char *name;
+       irqreturn_t (*isr)(int irq, void *data);
+};
+
+struct pm2xxx_charger {
+       struct device *dev;
+       u8 chip_id;
+       bool vddadc_en_ac;
+       struct pm2xxx_config config;
+       bool ac_conn;
+       unsigned int gpio_irq;
+       int vbat;
+       int old_vbat;
+       int failure_case;
+       int failure_input_ovv;
+       unsigned int lpn_pin;
+       struct pm2xxx_interrupts *pm2_int;
+       struct ab8500_gpadc *gpadc;
+       struct regulator *regu;
+       struct pm2xxx_bm_data *bat;
+       struct mutex lock;
+       struct ab8500 *parent;
+       struct pm2xxx_charger_info ac;
+       struct pm2xxx_charger_platform_data *pdata;
+       struct workqueue_struct *charger_wq;
+       struct delayed_work check_vbat_work;
+       struct work_struct ac_work;
+       struct work_struct check_main_thermal_prot_work;
+       struct ux500_charger ac_chg;
+       struct pm2xxx_charger_event_flags flags;
+};
+
+#endif /* PM2301_CHARGER_H */
index 40fa3b7cae548033d0d69e358574326e11a8f22c..29178f78d73cfb79868923bfee7ffbe30b393e1b 100644 (file)
@@ -55,7 +55,8 @@ static ssize_t power_supply_show_property(struct device *dev,
        };
        static char *health_text[] = {
                "Unknown", "Good", "Overheat", "Dead", "Over voltage",
-               "Unspecified failure", "Cold",
+               "Unspecified failure", "Cold", "Watchdog timer expire",
+               "Safety timer expire"
        };
        static char *technology_text[] = {
                "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
index 6461b489fb09f3eedbc6b3ff7b1e9b9002de920a..1ae65b822864b6b3eb5d4d36e2e1ccebd05fb0f4 100644 (file)
@@ -13,3 +13,20 @@ config POWER_RESET_GPIO
          This driver supports turning off your board via a GPIO line.
          If your board needs a GPIO high/low to power down, say Y and
          create a binding in your devicetree.
+
+config POWER_RESET_QNAP
+       bool "QNAP power-off driver"
+       depends on OF_GPIO && POWER_RESET && PLAT_ORION
+       help
+         This driver supports turning off QNAP NAS devices by sending
+         commands to the microcontroller which controls the main power.
+
+         Say Y if you have a QNAP NAS.
+
+config POWER_RESET_RESTART
+       bool "Restart power-off driver"
+       depends on ARM
+       help
+         Some boards don't actually have the ability to power off.
+         Instead they restart, and u-boot holds the SoC until the
+         user presses a key. u-boot then boots into Linux.
index 751488a4a0c5dbd95925c200f406aa1dfc2f8209..0f317f50c56fb90f5ef0b76ab1ef7d324fdbf16e 100644 (file)
@@ -1 +1,3 @@
 obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
+obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
\ No newline at end of file
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
new file mode 100644 (file)
index 0000000..37f56f7
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * QNAP Turbo NAS Board power off
+ *
+ * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch>
+ *
+ * Based on the code from:
+ *
+ * Copyright (C) 2009  Martin Michlmayr <tbm@cyrius.com>
+ * Copyright (C) 2008  Byron Bradley <byron.bbradley@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/serial_reg.h>
+#include <linux/kallsyms.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#define UART1_REG(x)   (base + ((UART_##x) << 2))
+
+static void __iomem *base;
+static unsigned long tclk;
+
+static void qnap_power_off(void)
+{
+       /* 19200 baud divisor */
+       const unsigned divisor = ((tclk + (8 * 19200)) / (16 * 19200));
+
+       pr_err("%s: triggering power-off...\n", __func__);
+
+       /* hijack UART1 and reset into sane state (19200,8n1) */
+       writel(0x83, UART1_REG(LCR));
+       writel(divisor & 0xff, UART1_REG(DLL));
+       writel((divisor >> 8) & 0xff, UART1_REG(DLM));
+       writel(0x03, UART1_REG(LCR));
+       writel(0x00, UART1_REG(IER));
+       writel(0x00, UART1_REG(FCR));
+       writel(0x00, UART1_REG(MCR));
+
+       /* send the power-off command 'A' to PIC */
+       writel('A', UART1_REG(TX));
+}
+
+static int qnap_power_off_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct clk *clk;
+       char symname[KSYM_NAME_LEN];
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "Missing resource");
+               return -EINVAL;
+       }
+
+       base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+       if (!base) {
+               dev_err(&pdev->dev, "Unable to map resource");
+               return -EINVAL;
+       }
+
+       /* We need to know tclk in order to calculate the UART divisor */
+       clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "Clk missing");
+               return PTR_ERR(clk);
+       }
+
+       tclk = clk_get_rate(clk);
+
+       /* Check that nothing else has already setup a handler */
+       if (pm_power_off) {
+               lookup_symbol_name((ulong)pm_power_off, symname);
+               dev_err(&pdev->dev,
+                       "pm_power_off already claimed %p %s",
+                       pm_power_off, symname);
+               return -EBUSY;
+       }
+       pm_power_off = qnap_power_off;
+
+       return 0;
+}
+
+static int qnap_power_off_remove(struct platform_device *pdev)
+{
+       pm_power_off = NULL;
+       return 0;
+}
+
+static const struct of_device_id qnap_power_off_of_match_table[] = {
+       { .compatible = "qnap,power-off", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, qnap_power_off_of_match_table);
+
+static struct platform_driver qnap_power_off_driver = {
+       .probe  = qnap_power_off_probe,
+       .remove = qnap_power_off_remove,
+       .driver = {
+               .owner  = THIS_MODULE,
+               .name   = "qnap_power_off",
+               .of_match_table = of_match_ptr(qnap_power_off_of_match_table),
+       },
+};
+module_platform_driver(qnap_power_off_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch>");
+MODULE_DESCRIPTION("QNAP Power off driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
new file mode 100644 (file)
index 0000000..059cd15
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Power off by restarting and let u-boot keep hold of the machine
+ * until the user presses a button for example.
+ *
+ * Andrew Lunn <andrew@lunn.ch>
+ *
+ * Copyright (C) 2012 Andrew Lunn
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <asm/system_misc.h>
+
+static void restart_poweroff_do_poweroff(void)
+{
+       arm_pm_restart('h', NULL);
+}
+
+static int restart_poweroff_probe(struct platform_device *pdev)
+{
+       /* If a pm_power_off function has already been added, leave it alone */
+       if (pm_power_off != NULL) {
+               dev_err(&pdev->dev,
+                       "pm_power_off function already registered");
+               return -EBUSY;
+       }
+
+       pm_power_off = &restart_poweroff_do_poweroff;
+       return 0;
+}
+
+static int restart_poweroff_remove(struct platform_device *pdev)
+{
+       if (pm_power_off == &restart_poweroff_do_poweroff)
+               pm_power_off = NULL;
+
+       return 0;
+}
+
+static const struct of_device_id of_restart_poweroff_match[] = {
+       { .compatible = "restart-poweroff", },
+       {},
+};
+
+static struct platform_driver restart_poweroff_driver = {
+       .probe = restart_poweroff_probe,
+       .remove = restart_poweroff_remove,
+       .driver = {
+               .name = "poweroff-restart",
+               .owner = THIS_MODULE,
+               .of_match_table = of_restart_poweroff_match,
+       },
+};
+module_platform_driver(restart_poweroff_driver);
+
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("restart poweroff driver");
+MODULE_LICENSE("GPLv2");
+MODULE_ALIAS("platform:poweroff-restart");
index 2b557119adad4b11d63ec345c96b425183e691f3..c79ab843333ee9d3eea9b46067c1c777050154f4 100644 (file)
@@ -30,8 +30,6 @@ struct pm8607_regulator_info {
        unsigned int    *vol_table;
        unsigned int    *vol_suspend;
 
-       int     update_reg;
-       int     update_bit;
        int     slope_double;
 };
 
@@ -222,29 +220,6 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
        return ret;
 }
 
-static int pm8607_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
-{
-       struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
-       uint8_t val;
-       int ret;
-
-       val = (uint8_t)(selector << (ffs(rdev->desc->vsel_mask) - 1));
-
-       ret = pm860x_set_bits(info->i2c, rdev->desc->vsel_reg,
-                             rdev->desc->vsel_mask, val);
-       if (ret)
-               return ret;
-       switch (info->desc.id) {
-       case PM8607_ID_BUCK1:
-       case PM8607_ID_BUCK3:
-               ret = pm860x_set_bits(info->i2c, info->update_reg,
-                                     1 << info->update_bit,
-                                     1 << info->update_bit);
-               break;
-       }
-       return ret;
-}
-
 static int pm8606_preg_enable(struct regulator_dev *rdev)
 {
        struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
@@ -276,7 +251,7 @@ static int pm8606_preg_is_enabled(struct regulator_dev *rdev)
 
 static struct regulator_ops pm8607_regulator_ops = {
        .list_voltage   = pm8607_list_voltage,
-       .set_voltage_sel = pm8607_set_voltage_sel,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -313,11 +288,11 @@ static struct regulator_ops pm8606_preg_ops = {
                .n_voltages = ARRAY_SIZE(vreg##_table),                 \
                .vsel_reg = PM8607_##vreg,                              \
                .vsel_mask = ARRAY_SIZE(vreg##_table) - 1,              \
+               .apply_reg = PM8607_##ureg,                             \
+               .apply_bit = (ubit),                                    \
                .enable_reg = PM8607_##ereg,                            \
                .enable_mask = 1 << (ebit),                             \
        },                                                              \
-       .update_reg     = PM8607_##ureg,                                \
-       .update_bit     = (ubit),                                       \
        .slope_double   = (0),                                          \
        .vol_table      = (unsigned int *)&vreg##_table,                \
        .vol_suspend    = (unsigned int *)&vreg##_suspend_table,        \
@@ -343,9 +318,9 @@ static struct regulator_ops pm8606_preg_ops = {
 }
 
 static struct pm8607_regulator_info pm8607_regulator_info[] = {
-       PM8607_DVC(BUCK1, GO, 0, SUPPLIES_EN11, 0),
-       PM8607_DVC(BUCK2, GO, 1, SUPPLIES_EN11, 1),
-       PM8607_DVC(BUCK3, GO, 2, SUPPLIES_EN11, 2),
+       PM8607_DVC(BUCK1, GO, BIT(0), SUPPLIES_EN11, 0),
+       PM8607_DVC(BUCK2, GO, BIT(1), SUPPLIES_EN11, 1),
+       PM8607_DVC(BUCK3, GO, BIT(2), SUPPLIES_EN11, 2),
 
        PM8607_LDO(1,         LDO1, 0, SUPPLIES_EN11, 3),
        PM8607_LDO(2,         LDO2, 0, SUPPLIES_EN11, 4),
@@ -372,7 +347,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
                                    struct regulator_config *config)
 {
        struct device_node *nproot, *np;
-       nproot = pdev->dev.parent->of_node;
+       nproot = of_node_get(pdev->dev.parent->of_node);
        if (!nproot)
                return -ENODEV;
        nproot = of_find_node_by_name(nproot, "regulators");
@@ -388,6 +363,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
                        break;
                }
        }
+       of_node_put(nproot);
        return 0;
 }
 #else
index 551a22b075387a0641d37ff9dc1a34e25b65ee9d..a5d97eaee99e3085ad5dd8c5c5d294511b374c5f 100644 (file)
@@ -91,6 +91,7 @@ config REGULATOR_AAT2870
 config REGULATOR_ARIZONA
        tristate "Wolfson Arizona class devices"
        depends on MFD_ARIZONA
+       depends on SND_SOC
        help
          Support for the regulators found on Wolfson Arizona class
          devices.
@@ -277,6 +278,15 @@ config REGULATOR_LP872X
        help
          This driver supports LP8720/LP8725 PMIC
 
+config REGULATOR_LP8755
+       tristate "TI LP8755 High Performance PMU driver"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         This driver supports LP8755 High Performance PMU driver. This
+         chip contains six step-down DC/DC converters which can support
+         9 mode multiphase configuration.
+
 config REGULATOR_LP8788
        bool "TI LP8788 Power Regulators"
        depends on MFD_LP8788
index b802b0c7fb02d7d7bb3bf3c1fa72b95aed899ce4..6e8250382defc5d4220c1e54be835bdb93dfe45a 100644 (file)
@@ -30,6 +30,7 @@ obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
 obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
 obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
 obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
+obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
 obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
 obj-$(CONFIG_REGULATOR_MAX8649)        += max8649.o
 obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
index 8f39cac661d26deb4025e2787add57f22dc50db2..0d4a8ccbb53616275bda7d8f4ca817d8d68ad104 100644 (file)
 #include <linux/regulator/driver.h>
 #include <linux/regulator/of_regulator.h>
 
+#define LDO_RAMP_UP_UNIT_IN_CYCLES      64 /* 64 cycles per step */
+#define LDO_RAMP_UP_FREQ_IN_MHZ         24 /* cycle based on 24M OSC */
+
 struct anatop_regulator {
        const char *name;
        u32 control_reg;
        struct regmap *anatop;
        int vol_bit_shift;
        int vol_bit_width;
+       u32 delay_reg;
+       int delay_bit_shift;
+       int delay_bit_width;
        int min_bit_val;
        int min_voltage;
        int max_voltage;
@@ -55,6 +61,32 @@ static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
        return regulator_set_voltage_sel_regmap(reg, selector);
 }
 
+static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
+       unsigned int old_sel,
+       unsigned int new_sel)
+{
+       struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+       u32 val;
+       int ret = 0;
+
+       /* check whether need to care about LDO ramp up speed */
+       if (anatop_reg->delay_bit_width && new_sel > old_sel) {
+               /*
+                * the delay for LDO ramp up time is
+                * based on the register setting, we need
+                * to calculate how many steps LDO need to
+                * ramp up, and how much delay needed. (us)
+                */
+               regmap_read(anatop_reg->anatop, anatop_reg->delay_reg, &val);
+               val = (val >> anatop_reg->delay_bit_shift) &
+                       ((1 << anatop_reg->delay_bit_width) - 1);
+               ret = (new_sel - old_sel) * (LDO_RAMP_UP_UNIT_IN_CYCLES <<
+                       val) / LDO_RAMP_UP_FREQ_IN_MHZ + 1;
+       }
+
+       return ret;
+}
+
 static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
 {
        struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
@@ -67,6 +99,7 @@ static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
 
 static struct regulator_ops anatop_rops = {
        .set_voltage_sel = anatop_regmap_set_voltage_sel,
+       .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
        .get_voltage_sel = anatop_regmap_get_voltage_sel,
        .list_voltage = regulator_list_voltage_linear,
        .map_voltage = regulator_map_voltage_linear,
@@ -143,6 +176,14 @@ static int anatop_regulator_probe(struct platform_device *pdev)
                goto anatop_probe_end;
        }
 
+       /* read LDO ramp up setting, only for core reg */
+       of_property_read_u32(np, "anatop-delay-reg-offset",
+                            &sreg->delay_reg);
+       of_property_read_u32(np, "anatop-delay-bit-width",
+                            &sreg->delay_bit_width);
+       of_property_read_u32(np, "anatop-delay-bit-shift",
+                            &sreg->delay_bit_shift);
+
        rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage) / 25000 + 1
                            + sreg->min_bit_val;
        rdesc->min_uV = sreg->min_voltage;
index a6d040cbf8ac1dade86099eb0a71df4c2f94c4c8..e87536bf0bed038f09af9bc251c85d0b00fb6c97 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/regulator/machine.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <sound/soc.h>
 
 #include <linux/mfd/arizona/core.h>
 #include <linux/mfd/arizona/pdata.h>
@@ -34,6 +36,8 @@ struct arizona_micsupp {
 
        struct regulator_consumer_supply supply;
        struct regulator_init_data init_data;
+
+       struct work_struct check_cp_work;
 };
 
 static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
@@ -72,9 +76,73 @@ static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
        return selector;
 }
 
+static void arizona_micsupp_check_cp(struct work_struct *work)
+{
+       struct arizona_micsupp *micsupp =
+               container_of(work, struct arizona_micsupp, check_cp_work);
+       struct snd_soc_dapm_context *dapm = micsupp->arizona->dapm;
+       struct arizona *arizona = micsupp->arizona;
+       struct regmap *regmap = arizona->regmap;
+       unsigned int reg;
+       int ret;
+
+       ret = regmap_read(regmap, ARIZONA_MIC_CHARGE_PUMP_1, &reg);
+       if (ret != 0) {
+               dev_err(arizona->dev, "Failed to read CP state: %d\n", ret);
+               return;
+       }
+
+       if (dapm) {
+               if ((reg & (ARIZONA_CPMIC_ENA | ARIZONA_CPMIC_BYPASS)) ==
+                   ARIZONA_CPMIC_ENA)
+                       snd_soc_dapm_force_enable_pin(dapm, "MICSUPP");
+               else
+                       snd_soc_dapm_disable_pin(dapm, "MICSUPP");
+
+               snd_soc_dapm_sync(dapm);
+       }
+}
+
+static int arizona_micsupp_enable(struct regulator_dev *rdev)
+{
+       struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = regulator_enable_regmap(rdev);
+
+       if (ret == 0)
+               schedule_work(&micsupp->check_cp_work);
+
+       return ret;
+}
+
+static int arizona_micsupp_disable(struct regulator_dev *rdev)
+{
+       struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = regulator_disable_regmap(rdev);
+       if (ret == 0)
+               schedule_work(&micsupp->check_cp_work);
+
+       return ret;
+}
+
+static int arizona_micsupp_set_bypass(struct regulator_dev *rdev, bool ena)
+{
+       struct arizona_micsupp *micsupp = rdev_get_drvdata(rdev);
+       int ret;
+
+       ret = regulator_set_bypass_regmap(rdev, ena);
+       if (ret == 0)
+               schedule_work(&micsupp->check_cp_work);
+
+       return ret;
+}
+
 static struct regulator_ops arizona_micsupp_ops = {
-       .enable = regulator_enable_regmap,
-       .disable = regulator_disable_regmap,
+       .enable = arizona_micsupp_enable,
+       .disable = arizona_micsupp_disable,
        .is_enabled = regulator_is_enabled_regmap,
 
        .list_voltage = arizona_micsupp_list_voltage,
@@ -84,7 +152,7 @@ static struct regulator_ops arizona_micsupp_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
 
        .get_bypass = regulator_get_bypass_regmap,
-       .set_bypass = regulator_set_bypass_regmap,
+       .set_bypass = arizona_micsupp_set_bypass,
 };
 
 static const struct regulator_desc arizona_micsupp = {
@@ -109,7 +177,8 @@ static const struct regulator_desc arizona_micsupp = {
 static const struct regulator_init_data arizona_micsupp_default = {
        .constraints = {
                .valid_ops_mask = REGULATOR_CHANGE_STATUS |
-                               REGULATOR_CHANGE_VOLTAGE,
+                               REGULATOR_CHANGE_VOLTAGE |
+                               REGULATOR_CHANGE_BYPASS,
                .min_uV = 1700000,
                .max_uV = 3300000,
        },
@@ -131,6 +200,7 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
        }
 
        micsupp->arizona = arizona;
+       INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp);
 
        /*
         * Since the chip usually supplies itself we provide some
index 2f1341db38a0ad8d80802834244641a310a20399..f0ba8c4eefa95099ebb9a14912a3cf302f38365d 100644 (file)
@@ -303,7 +303,7 @@ static int as3711_regulator_probe(struct platform_device *pdev)
                reg_data = pdata ? pdata->init_data[id] : NULL;
 
                /* No need to register if there is no regulator data */
-               if (!ri->desc.name)
+               if (!reg_data)
                        continue;
 
                reg = &regs[id];
index 0f65b246cc0c8d24c559cdeafd340232d1c55e0c..da9782bd27d0a182d32578c4779481072d0657ef 100644 (file)
@@ -200,8 +200,8 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
        }
 
        if (*min_uV > *max_uV) {
-               dev_err(regulator->dev, "Restricting voltage, %u-%uuV\n",
-                       regulator->min_uV, regulator->max_uV);
+               rdev_err(rdev, "Restricting voltage, %u-%uuV\n",
+                       *min_uV, *max_uV);
                return -EINVAL;
        }
 
@@ -1885,9 +1885,15 @@ int regulator_can_change_voltage(struct regulator *regulator)
        struct regulator_dev    *rdev = regulator->rdev;
 
        if (rdev->constraints &&
-           rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE &&
-           (rdev->desc->n_voltages - rdev->desc->linear_min_sel) > 1)
-               return 1;
+           (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+               if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
+                       return 1;
+
+               if (rdev->desc->continuous_voltage_range &&
+                   rdev->constraints->min_uV && rdev->constraints->max_uV &&
+                   rdev->constraints->min_uV != rdev->constraints->max_uV)
+                       return 1;
+       }
 
        return 0;
 }
@@ -2074,10 +2080,20 @@ EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
  */
 int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
 {
+       int ret;
+
        sel <<= ffs(rdev->desc->vsel_mask) - 1;
 
-       return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+       ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
                                  rdev->desc->vsel_mask, sel);
+       if (ret)
+               return ret;
+
+       if (rdev->desc->apply_bit)
+               ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+                                        rdev->desc->apply_bit,
+                                        rdev->desc->apply_bit);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
 
@@ -2223,8 +2239,11 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
                        best_val = rdev->desc->ops->list_voltage(rdev, ret);
                        if (min_uV <= best_val && max_uV >= best_val) {
                                selector = ret;
-                               ret = rdev->desc->ops->set_voltage_sel(rdev,
-                                                                      ret);
+                               if (old_selector == selector)
+                                       ret = 0;
+                               else
+                                       ret = rdev->desc->ops->set_voltage_sel(
+                                                               rdev, ret);
                        } else {
                                ret = -EINVAL;
                        }
@@ -2235,7 +2254,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
 
        /* Call set_voltage_time_sel if successfully obtained old_selector */
        if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
-           rdev->desc->ops->set_voltage_time_sel) {
+           old_selector != selector && rdev->desc->ops->set_voltage_time_sel) {
 
                delay = rdev->desc->ops->set_voltage_time_sel(rdev,
                                                old_selector, selector);
@@ -2288,6 +2307,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
 {
        struct regulator_dev *rdev = regulator->rdev;
        int ret = 0;
+       int old_min_uV, old_max_uV;
 
        mutex_lock(&rdev->mutex);
 
@@ -2309,18 +2329,29 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
        ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
        if (ret < 0)
                goto out;
+       
+       /* restore original values in case of error */
+       old_min_uV = regulator->min_uV;
+       old_max_uV = regulator->max_uV;
        regulator->min_uV = min_uV;
        regulator->max_uV = max_uV;
 
        ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
        if (ret < 0)
-               goto out;
+               goto out2;
 
        ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
-
+       if (ret < 0)
+               goto out2;
+       
 out:
        mutex_unlock(&rdev->mutex);
        return ret;
+out2:
+       regulator->min_uV = old_min_uV;
+       regulator->max_uV = old_max_uV;
+       mutex_unlock(&rdev->mutex);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_voltage);
 
@@ -3202,7 +3233,7 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
                if (status < 0)
                        return status;
        }
-       if (ops->is_enabled) {
+       if (rdev->ena_gpio || ops->is_enabled) {
                status = device_create_file(dev, &dev_attr_state);
                if (status < 0)
                        return status;
@@ -3315,7 +3346,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
  * @config: runtime configuration for regulator
  *
  * Called by regulator drivers to register a regulator.
- * Returns 0 on success.
+ * Returns a valid pointer to struct regulator_dev on success
+ * or an ERR_PTR() on error.
  */
 struct regulator_dev *
 regulator_register(const struct regulator_desc *regulator_desc,
index d0963090442d220f69ebecb8c6dbbf22866b62f8..96b569abb46cfe055a50cdf35a4c362a15ba0a3a 100644 (file)
@@ -70,7 +70,6 @@ struct da9052_regulator_info {
        int step_uV;
        int min_uV;
        int max_uV;
-       unsigned char activate_bit;
 };
 
 struct da9052_regulator {
@@ -210,36 +209,6 @@ static int da9052_map_voltage(struct regulator_dev *rdev,
        return sel;
 }
 
-static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
-                                           unsigned int selector)
-{
-       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
-       struct da9052_regulator_info *info = regulator->info;
-       int id = rdev_get_id(rdev);
-       int ret;
-
-       ret = da9052_reg_update(regulator->da9052, rdev->desc->vsel_reg,
-                               rdev->desc->vsel_mask, selector);
-       if (ret < 0)
-               return ret;
-
-       /* Some LDOs and DCDCs are DVC controlled which requires enabling of
-        * the activate bit to implment the changes on the output.
-        */
-       switch (id) {
-       case DA9052_ID_BUCK1:
-       case DA9052_ID_BUCK2:
-       case DA9052_ID_BUCK3:
-       case DA9052_ID_LDO2:
-       case DA9052_ID_LDO3:
-               ret = da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
-                                       info->activate_bit, info->activate_bit);
-               break;
-       }
-
-       return ret;
-}
-
 static struct regulator_ops da9052_dcdc_ops = {
        .get_current_limit = da9052_dcdc_get_current_limit,
        .set_current_limit = da9052_dcdc_set_current_limit,
@@ -247,7 +216,7 @@ static struct regulator_ops da9052_dcdc_ops = {
        .list_voltage = da9052_list_voltage,
        .map_voltage = da9052_map_voltage,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .set_voltage_sel = da9052_regulator_set_voltage_sel,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -257,7 +226,7 @@ static struct regulator_ops da9052_ldo_ops = {
        .list_voltage = da9052_list_voltage,
        .map_voltage = da9052_map_voltage,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .set_voltage_sel = da9052_regulator_set_voltage_sel,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -274,13 +243,14 @@ static struct regulator_ops da9052_ldo_ops = {
                .owner = THIS_MODULE,\
                .vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
                .vsel_mask = (1 << (sbits)) - 1,\
+               .apply_reg = DA9052_SUPPLY_REG, \
+               .apply_bit = (abits), \
                .enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
                .enable_mask = 1 << (ebits),\
        },\
        .min_uV = (min) * 1000,\
        .max_uV = (max) * 1000,\
        .step_uV = (step) * 1000,\
-       .activate_bit = (abits),\
 }
 
 #define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
@@ -294,13 +264,14 @@ static struct regulator_ops da9052_ldo_ops = {
                .owner = THIS_MODULE,\
                .vsel_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
                .vsel_mask = (1 << (sbits)) - 1,\
+               .apply_reg = DA9052_SUPPLY_REG, \
+               .apply_bit = (abits), \
                .enable_reg = DA9052_BUCKCORE_REG + DA9052_ID_##_id, \
                .enable_mask = 1 << (ebits),\
        },\
        .min_uV = (min) * 1000,\
        .max_uV = (max) * 1000,\
        .step_uV = (step) * 1000,\
-       .activate_bit = (abits),\
 }
 
 static struct da9052_regulator_info da9052_regulator_info[] = {
@@ -395,9 +366,9 @@ static int da9052_regulator_probe(struct platform_device *pdev)
                config.init_data = pdata->regulators[pdev->id];
        } else {
 #ifdef CONFIG_OF
-               struct device_node *nproot = da9052->dev->of_node;
-               struct device_node *np;
+               struct device_node *nproot, *np;
 
+               nproot = of_node_get(da9052->dev->of_node);
                if (!nproot)
                        return -ENODEV;
 
@@ -414,6 +385,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
                                break;
                        }
                }
+               of_node_put(nproot);
 #endif
        }
 
index 1a05ac66878f7933a71d926cf574c0ba65db51b4..30221099d09c4d09bc3f406c94e5fa089dadc5b8 100644 (file)
@@ -58,7 +58,6 @@ struct da9055_volt_reg {
        int reg_b;
        int sl_shift;
        int v_mask;
-       int v_shift;
 };
 
 struct da9055_mode_reg {
@@ -388,7 +387,6 @@ static struct regulator_ops da9055_ldo_ops = {
                .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
                .sl_shift = 7,\
                .v_mask = (1 << (vbits)) - 1,\
-               .v_shift = (vbits),\
        },\
 }
 
@@ -417,7 +415,6 @@ static struct regulator_ops da9055_ldo_ops = {
                .reg_b = DA9055_REG_VBCORE_B + DA9055_ID_##_id, \
                .sl_shift = 7,\
                .v_mask = (1 << (vbits)) - 1,\
-               .v_shift = (vbits),\
        },\
        .mode = {\
                .reg = DA9055_REG_BCORE_MODE,\
index 261f3d2299bc0a5d2d074824211172be7a6b8448..89bd2faaef8cf627cddcd9f1b7a03c9b4cab5fae 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 
 #include "dbx500-prcmu.h"
 
index bae681ccd3ea73a24c0c0d3f98a948d9a7fd06f1..9d39eb4aafa3634762678dcf65d460d66b9de8a7 100644 (file)
@@ -132,7 +132,7 @@ static struct regulator_ops gpio_regulator_voltage_ops = {
        .list_voltage = gpio_regulator_list_voltage,
 };
 
-struct gpio_regulator_config *
+static struct gpio_regulator_config *
 of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
 {
        struct gpio_regulator_config *config;
@@ -163,10 +163,7 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
        config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
 
        /* Fetch GPIOs. */
-       for (i = 0; ; i++)
-               if (of_get_named_gpio(np, "gpios", i) < 0)
-                       break;
-       config->nr_gpios = i;
+       config->nr_gpios = of_gpio_count(np);
 
        config->gpios = devm_kzalloc(dev,
                                sizeof(struct gpio) * config->nr_gpios,
index 5f68ff11a2985bb6519d0b5d4f2a8c1e89ce9e05..9cb2c0f34515298d33782b256504334863f0e474 100644 (file)
@@ -73,8 +73,6 @@ static const unsigned int buck_voltage_map[] = {
 };
 
 #define BUCK_TARGET_VOL_MASK 0x3f
-#define BUCK_TARGET_VOL_MIN_IDX 0x01
-#define BUCK_TARGET_VOL_MAX_IDX 0x19
 
 #define LP3971_BUCK_RAMP_REG(x)        (buck_base_addr[x]+2)
 
@@ -140,7 +138,7 @@ static int lp3971_ldo_disable(struct regulator_dev *dev)
        return lp3971_set_bits(lp3971, LP3971_LDO_ENABLE_REG, mask, 0);
 }
 
-static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
+static int lp3971_ldo_get_voltage_sel(struct regulator_dev *dev)
 {
        struct lp3971 *lp3971 = rdev_get_drvdata(dev);
        int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -149,7 +147,7 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
        reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
        val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
 
-       return dev->desc->volt_table[val];
+       return val;
 }
 
 static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -168,7 +166,7 @@ static struct regulator_ops lp3971_ldo_ops = {
        .is_enabled = lp3971_ldo_is_enabled,
        .enable = lp3971_ldo_enable,
        .disable = lp3971_ldo_disable,
-       .get_voltage = lp3971_ldo_get_voltage,
+       .get_voltage_sel = lp3971_ldo_get_voltage_sel,
        .set_voltage_sel = lp3971_ldo_set_voltage_sel,
 };
 
@@ -201,24 +199,16 @@ static int lp3971_dcdc_disable(struct regulator_dev *dev)
        return lp3971_set_bits(lp3971, LP3971_BUCK_VOL_ENABLE_REG, mask, 0);
 }
 
-static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
+static int lp3971_dcdc_get_voltage_sel(struct regulator_dev *dev)
 {
        struct lp3971 *lp3971 = rdev_get_drvdata(dev);
        int buck = rdev_get_id(dev) - LP3971_DCDC1;
        u16 reg;
-       int val;
 
        reg = lp3971_reg_read(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck));
        reg &= BUCK_TARGET_VOL_MASK;
 
-       if (reg <= BUCK_TARGET_VOL_MAX_IDX)
-               val = buck_voltage_map[reg];
-       else {
-               val = 0;
-               dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
-       }
-
-       return val;
+       return reg;
 }
 
 static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -249,7 +239,7 @@ static struct regulator_ops lp3971_dcdc_ops = {
        .is_enabled = lp3971_dcdc_is_enabled,
        .enable = lp3971_dcdc_enable,
        .disable = lp3971_dcdc_disable,
-       .get_voltage = lp3971_dcdc_get_voltage,
+       .get_voltage_sel = lp3971_dcdc_get_voltage_sel,
        .set_voltage_sel = lp3971_dcdc_set_voltage_sel,
 };
 
index 69c42c318b87c5506a4f1c9548bf1a303ddc3ae3..0baabcfb578afb728d86b3a6eb84c5abe04801c6 100644 (file)
@@ -165,8 +165,6 @@ static const int buck_base_addr[] = {
 #define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
 #define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
 #define LP3972_BUCK_VOL_MASK 0x1f
-#define LP3972_BUCK_VOL_MIN_IDX(x) ((x) ? 0x01 : 0x00)
-#define LP3972_BUCK_VOL_MAX_IDX(x) ((x) ? 0x19 : 0x1f)
 
 static int lp3972_i2c_read(struct i2c_client *i2c, char reg, int count,
        u16 *dest)
@@ -257,7 +255,7 @@ static int lp3972_ldo_disable(struct regulator_dev *dev)
                                mask, 0);
 }
 
-static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
+static int lp3972_ldo_get_voltage_sel(struct regulator_dev *dev)
 {
        struct lp3972 *lp3972 = rdev_get_drvdata(dev);
        int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -267,7 +265,7 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
        reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
        val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
 
-       return dev->desc->volt_table[val];
+       return val;
 }
 
 static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -314,7 +312,7 @@ static struct regulator_ops lp3972_ldo_ops = {
        .is_enabled = lp3972_ldo_is_enabled,
        .enable = lp3972_ldo_enable,
        .disable = lp3972_ldo_disable,
-       .get_voltage = lp3972_ldo_get_voltage,
+       .get_voltage_sel = lp3972_ldo_get_voltage_sel,
        .set_voltage_sel = lp3972_ldo_set_voltage_sel,
 };
 
@@ -353,24 +351,16 @@ static int lp3972_dcdc_disable(struct regulator_dev *dev)
        return val;
 }
 
-static int lp3972_dcdc_get_voltage(struct regulator_dev *dev)
+static int lp3972_dcdc_get_voltage_sel(struct regulator_dev *dev)
 {
        struct lp3972 *lp3972 = rdev_get_drvdata(dev);
        int buck = rdev_get_id(dev) - LP3972_DCDC1;
        u16 reg;
-       int val;
 
        reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
        reg &= LP3972_BUCK_VOL_MASK;
-       if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck))
-               val = dev->desc->volt_table[reg];
-       else {
-               val = 0;
-               dev_warn(&dev->dev, "chip reported incorrect voltage value."
-                                   " reg = %d\n", reg);
-       }
 
-       return val;
+       return reg;
 }
 
 static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -402,7 +392,7 @@ static struct regulator_ops lp3972_dcdc_ops = {
        .is_enabled = lp3972_dcdc_is_enabled,
        .enable = lp3972_dcdc_enable,
        .disable = lp3972_dcdc_disable,
-       .get_voltage = lp3972_dcdc_get_voltage,
+       .get_voltage_sel = lp3972_dcdc_get_voltage_sel,
        .set_voltage_sel = lp3972_dcdc_set_voltage_sel,
 };
 
index 9289ead715cab59d749c56222f605195051395db..8e3c7ae0047f01fbe77871c79cf36e6e34e0b166 100644 (file)
@@ -181,20 +181,6 @@ static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
        return regmap_update_bits(lp->regmap, addr, mask, data);
 }
 
-static int _rdev_to_offset(struct regulator_dev *rdev)
-{
-       enum lp872x_regulator_id id = rdev_get_id(rdev);
-
-       switch (id) {
-       case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
-               return id;
-       case LP8725_ID_LDO1 ... LP8725_ID_BUCK2:
-               return id - LP8725_ID_BASE;
-       default:
-               return -EINVAL;
-       }
-}
-
 static int lp872x_get_timestep_usec(struct lp872x *lp)
 {
        enum lp872x_id chip = lp->chipid;
@@ -234,28 +220,20 @@ static int lp872x_get_timestep_usec(struct lp872x *lp)
 static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
 {
        struct lp872x *lp = rdev_get_drvdata(rdev);
-       enum lp872x_regulator_id regulator = rdev_get_id(rdev);
+       enum lp872x_regulator_id rid = rdev_get_id(rdev);
        int time_step_us = lp872x_get_timestep_usec(lp);
-       int ret, offset;
+       int ret;
        u8 addr, val;
 
        if (time_step_us < 0)
                return -EINVAL;
 
-       switch (regulator) {
-       case LP8720_ID_LDO1 ... LP8720_ID_LDO5:
-       case LP8725_ID_LDO1 ... LP8725_ID_LILO2:
-               offset = _rdev_to_offset(rdev);
-               if (offset < 0)
-                       return -EINVAL;
-
-               addr = LP872X_LDO1_VOUT + offset;
-               break;
-       case LP8720_ID_BUCK:
-               addr = LP8720_BUCK_VOUT1;
+       switch (rid) {
+       case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
+               addr = LP872X_LDO1_VOUT + rid;
                break;
-       case LP8725_ID_BUCK1:
-               addr = LP8725_BUCK1_VOUT1;
+       case LP8725_ID_LDO1 ... LP8725_ID_BUCK1:
+               addr = LP872X_LDO1_VOUT + rid - LP8725_ID_BASE;
                break;
        case LP8725_ID_BUCK2:
                addr = LP8725_BUCK2_VOUT1;
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
new file mode 100644 (file)
index 0000000..f0f6ea0
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * LP8755 High Performance Power Management Unit : System Interface Driver
+ * (based on rev. 0.26)
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/platform_data/lp8755.h>
+
+#define LP8755_REG_BUCK0       0x00
+#define LP8755_REG_BUCK1       0x03
+#define LP8755_REG_BUCK2       0x04
+#define LP8755_REG_BUCK3       0x01
+#define LP8755_REG_BUCK4       0x05
+#define LP8755_REG_BUCK5       0x02
+#define LP8755_REG_MAX         0xFF
+
+#define LP8755_BUCK_EN_M       BIT(7)
+#define LP8755_BUCK_LINEAR_OUT_MAX     0x76
+#define LP8755_BUCK_VOUT_M     0x7F
+
+struct lp8755_mphase {
+       int nreg;
+       int buck_num[LP8755_BUCK_MAX];
+};
+
+struct lp8755_chip {
+       struct device *dev;
+       struct regmap *regmap;
+       struct lp8755_platform_data *pdata;
+
+       int irq;
+       unsigned int irqmask;
+
+       int mphase;
+       struct regulator_dev *rdev[LP8755_BUCK_MAX];
+};
+
+/**
+ *lp8755_read : read a single register value from lp8755.
+ *@pchip : device to read from
+ *@reg   : register to read from
+ *@val   : pointer to store read value
+ */
+static int lp8755_read(struct lp8755_chip *pchip, unsigned int reg,
+                      unsigned int *val)
+{
+       return regmap_read(pchip->regmap, reg, val);
+}
+
+/**
+ *lp8755_write : write a single register value to lp8755.
+ *@pchip : device to write to
+ *@reg   : register to write to
+ *@val   : value to be written
+ */
+static int lp8755_write(struct lp8755_chip *pchip, unsigned int reg,
+                       unsigned int val)
+{
+       return regmap_write(pchip->regmap, reg, val);
+}
+
+/**
+ *lp8755_update_bits : set the values of bit fields in lp8755 register.
+ *@pchip : device to read from
+ *@reg   : register to update
+ *@mask  : bitmask to be changed
+ *@val   : value for bitmask
+ */
+static int lp8755_update_bits(struct lp8755_chip *pchip, unsigned int reg,
+                             unsigned int mask, unsigned int val)
+{
+       return regmap_update_bits(pchip->regmap, reg, mask, val);
+}
+
+static int lp8755_buck_enable_time(struct regulator_dev *rdev)
+{
+       int ret;
+       unsigned int regval;
+       enum lp8755_bucks id = rdev_get_id(rdev);
+       struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+       ret = lp8755_read(pchip, 0x12 + id, &regval);
+       if (ret < 0) {
+               dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+               return ret;
+       }
+       return (regval & 0xff) * 100;
+}
+
+static int lp8755_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+       int ret;
+       unsigned int regbval = 0x0;
+       enum lp8755_bucks id = rdev_get_id(rdev);
+       struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+       switch (mode) {
+       case REGULATOR_MODE_FAST:
+               /* forced pwm mode */
+               regbval = (0x01 << id);
+               break;
+       case REGULATOR_MODE_NORMAL:
+               /* enable automatic pwm/pfm mode */
+               ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x00);
+               if (ret < 0)
+                       goto err_i2c;
+               break;
+       case REGULATOR_MODE_IDLE:
+               /* enable automatic pwm/pfm/lppfm mode */
+               ret = lp8755_update_bits(pchip, 0x08 + id, 0x20, 0x20);
+               if (ret < 0)
+                       goto err_i2c;
+
+               ret = lp8755_update_bits(pchip, 0x10, 0x01, 0x01);
+               if (ret < 0)
+                       goto err_i2c;
+               break;
+       default:
+               dev_err(pchip->dev, "Not supported buck mode %s\n", __func__);
+               /* forced pwm mode */
+               regbval = (0x01 << id);
+       }
+
+       ret = lp8755_update_bits(pchip, 0x06, 0x01 << id, regbval);
+       if (ret < 0)
+               goto err_i2c;
+       return ret;
+err_i2c:
+       dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+       return ret;
+}
+
+static unsigned int lp8755_buck_get_mode(struct regulator_dev *rdev)
+{
+       int ret;
+       unsigned int regval;
+       enum lp8755_bucks id = rdev_get_id(rdev);
+       struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+       ret = lp8755_read(pchip, 0x06, &regval);
+       if (ret < 0)
+               goto err_i2c;
+
+       /* mode fast means forced pwm mode */
+       if (regval & (0x01 << id))
+               return REGULATOR_MODE_FAST;
+
+       ret = lp8755_read(pchip, 0x08 + id, &regval);
+       if (ret < 0)
+               goto err_i2c;
+
+       /* mode idle means automatic pwm/pfm/lppfm mode */
+       if (regval & 0x20)
+               return REGULATOR_MODE_IDLE;
+
+       /* mode normal means automatic pwm/pfm mode */
+       return REGULATOR_MODE_NORMAL;
+
+err_i2c:
+       dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+       return 0;
+}
+
+static int lp8755_buck_set_ramp(struct regulator_dev *rdev, int ramp)
+{
+       int ret;
+       unsigned int regval = 0x00;
+       enum lp8755_bucks id = rdev_get_id(rdev);
+       struct lp8755_chip *pchip = rdev_get_drvdata(rdev);
+
+       /* uV/us */
+       switch (ramp) {
+       case 0 ... 230:
+               regval = 0x07;
+               break;
+       case 231 ... 470:
+               regval = 0x06;
+               break;
+       case 471 ... 940:
+               regval = 0x05;
+               break;
+       case 941 ... 1900:
+               regval = 0x04;
+               break;
+       case 1901 ... 3800:
+               regval = 0x03;
+               break;
+       case 3801 ... 7500:
+               regval = 0x02;
+               break;
+       case 7501 ... 15000:
+               regval = 0x01;
+               break;
+       case 15001 ... 30000:
+               regval = 0x00;
+               break;
+       default:
+               dev_err(pchip->dev,
+                       "Not supported ramp value %d %s\n", ramp, __func__);
+               return -EINVAL;
+       }
+
+       ret = lp8755_update_bits(pchip, 0x07 + id, 0x07, regval);
+       if (ret < 0)
+               goto err_i2c;
+       return ret;
+err_i2c:
+       dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+       return ret;
+}
+
+static struct regulator_ops lp8755_buck_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .enable_time = lp8755_buck_enable_time,
+       .set_mode = lp8755_buck_set_mode,
+       .get_mode = lp8755_buck_get_mode,
+       .set_ramp_delay = lp8755_buck_set_ramp,
+};
+
+#define lp8755_rail(_id) "lp8755_buck"#_id
+#define lp8755_buck_init(_id)\
+{\
+       .constraints = {\
+               .name = lp8755_rail(_id),\
+               .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,\
+               .min_uV = 500000,\
+               .max_uV = 1675000,\
+       },\
+}
+
+static struct regulator_init_data lp8755_reg_default[LP8755_BUCK_MAX] = {
+       [LP8755_BUCK0] = lp8755_buck_init(0),
+       [LP8755_BUCK1] = lp8755_buck_init(1),
+       [LP8755_BUCK2] = lp8755_buck_init(2),
+       [LP8755_BUCK3] = lp8755_buck_init(3),
+       [LP8755_BUCK4] = lp8755_buck_init(4),
+       [LP8755_BUCK5] = lp8755_buck_init(5),
+};
+
+static const struct lp8755_mphase mphase_buck[MPHASE_CONF_MAX] = {
+       { 3, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK5 } },
+       { 6, { LP8755_BUCK0, LP8755_BUCK1, LP8755_BUCK2, LP8755_BUCK3,
+              LP8755_BUCK4, LP8755_BUCK5 } },
+       { 5, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK4,
+              LP8755_BUCK5} },
+       { 4, { LP8755_BUCK0, LP8755_BUCK3, LP8755_BUCK4, LP8755_BUCK5} },
+       { 3, { LP8755_BUCK0, LP8755_BUCK4, LP8755_BUCK5} },
+       { 2, { LP8755_BUCK0, LP8755_BUCK5} },
+       { 1, { LP8755_BUCK0} },
+       { 2, { LP8755_BUCK0, LP8755_BUCK3} },
+       { 4, { LP8755_BUCK0, LP8755_BUCK2, LP8755_BUCK3, LP8755_BUCK5} },
+};
+
+static int lp8755_init_data(struct lp8755_chip *pchip)
+{
+       unsigned int regval;
+       int ret, icnt, buck_num;
+       struct lp8755_platform_data *pdata = pchip->pdata;
+
+       /* read back  muti-phase configuration */
+       ret = lp8755_read(pchip, 0x3D, &regval);
+       if (ret < 0)
+               goto out_i2c_error;
+       pchip->mphase = regval & 0x0F;
+
+       /* set default data based on multi-phase config */
+       for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
+               buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
+               pdata->buck_data[buck_num] = &lp8755_reg_default[buck_num];
+       }
+       return ret;
+
+out_i2c_error:
+       dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+       return ret;
+}
+
+#define lp8755_buck_desc(_id)\
+{\
+       .name = lp8755_rail(_id),\
+       .id   = LP8755_BUCK##_id,\
+       .ops  = &lp8755_buck_ops,\
+       .n_voltages = LP8755_BUCK_LINEAR_OUT_MAX+1,\
+       .uV_step = 10000,\
+       .min_uV = 500000,\
+       .type = REGULATOR_VOLTAGE,\
+       .owner = THIS_MODULE,\
+       .enable_reg = LP8755_REG_BUCK##_id,\
+       .enable_mask = LP8755_BUCK_EN_M,\
+       .vsel_reg = LP8755_REG_BUCK##_id,\
+       .vsel_mask = LP8755_BUCK_VOUT_M,\
+}
+
+static struct regulator_desc lp8755_regulators[] = {
+       lp8755_buck_desc(0),
+       lp8755_buck_desc(1),
+       lp8755_buck_desc(2),
+       lp8755_buck_desc(3),
+       lp8755_buck_desc(4),
+       lp8755_buck_desc(5),
+};
+
+static int lp8755_regulator_init(struct lp8755_chip *pchip)
+{
+       int ret, icnt, buck_num;
+       struct lp8755_platform_data *pdata = pchip->pdata;
+       struct regulator_config rconfig = { };
+
+       rconfig.regmap = pchip->regmap;
+       rconfig.dev = pchip->dev;
+       rconfig.driver_data = pchip;
+
+       for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++) {
+               buck_num = mphase_buck[pchip->mphase].buck_num[icnt];
+               rconfig.init_data = pdata->buck_data[buck_num];
+               rconfig.of_node = pchip->dev->of_node;
+               pchip->rdev[buck_num] =
+                   regulator_register(&lp8755_regulators[buck_num], &rconfig);
+               if (IS_ERR(pchip->rdev[buck_num])) {
+                       ret = PTR_ERR(pchip->rdev[buck_num]);
+                       pchip->rdev[buck_num] = NULL;
+                       dev_err(pchip->dev, "regulator init failed: buck %d\n",
+                               buck_num);
+                       goto err_buck;
+               }
+       }
+
+       return 0;
+
+err_buck:
+       for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+               regulator_unregister(pchip->rdev[icnt]);
+       return ret;
+}
+
+static irqreturn_t lp8755_irq_handler(int irq, void *data)
+{
+       int ret, icnt;
+       unsigned int flag0, flag1;
+       struct lp8755_chip *pchip = data;
+
+       /* read flag0 register */
+       ret = lp8755_read(pchip, 0x0D, &flag0);
+       if (ret < 0)
+               goto err_i2c;
+       /* clear flag register to pull up int. pin */
+       ret = lp8755_write(pchip, 0x0D, 0x00);
+       if (ret < 0)
+               goto err_i2c;
+
+       /* sent power fault detection event to specific regulator */
+       for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+               if ((flag0 & (0x4 << icnt))
+                   && (pchip->irqmask & (0x04 << icnt))
+                   && (pchip->rdev[icnt] != NULL))
+                       regulator_notifier_call_chain(pchip->rdev[icnt],
+                                                     LP8755_EVENT_PWR_FAULT,
+                                                     NULL);
+
+       /* read flag1 register */
+       ret = lp8755_read(pchip, 0x0E, &flag1);
+       if (ret < 0)
+               goto err_i2c;
+       /* clear flag register to pull up int. pin */
+       ret = lp8755_write(pchip, 0x0E, 0x00);
+       if (ret < 0)
+               goto err_i2c;
+
+       /* send OCP event to all regualtor devices */
+       if ((flag1 & 0x01) && (pchip->irqmask & 0x01))
+               for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+                       if (pchip->rdev[icnt] != NULL)
+                               regulator_notifier_call_chain(pchip->rdev[icnt],
+                                                             LP8755_EVENT_OCP,
+                                                             NULL);
+
+       /* send OVP event to all regualtor devices */
+       if ((flag1 & 0x02) && (pchip->irqmask & 0x02))
+               for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+                       if (pchip->rdev[icnt] != NULL)
+                               regulator_notifier_call_chain(pchip->rdev[icnt],
+                                                             LP8755_EVENT_OVP,
+                                                             NULL);
+       return IRQ_HANDLED;
+
+err_i2c:
+       dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+       return IRQ_NONE;
+}
+
+static int lp8755_int_config(struct lp8755_chip *pchip)
+{
+       int ret;
+       unsigned int regval;
+
+       if (pchip->irq == 0) {
+               dev_warn(pchip->dev, "not use interrupt : %s\n", __func__);
+               return 0;
+       }
+
+       ret = lp8755_read(pchip, 0x0F, &regval);
+       if (ret < 0)
+               goto err_i2c;
+       pchip->irqmask = regval;
+       ret = request_threaded_irq(pchip->irq, NULL, lp8755_irq_handler,
+                                  IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                  "lp8755-irq", pchip);
+       if (ret)
+               return ret;
+
+       return ret;
+
+err_i2c:
+       dev_err(pchip->dev, "i2c acceess error %s\n", __func__);
+       return ret;
+}
+
+static const struct regmap_config lp8755_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .max_register = LP8755_REG_MAX,
+};
+
+static int lp8755_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       int ret, icnt;
+       struct lp8755_chip *pchip;
+       struct lp8755_platform_data *pdata = client->dev.platform_data;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+               dev_err(&client->dev, "i2c functionality check fail.\n");
+               return -EOPNOTSUPP;
+       }
+
+       pchip = devm_kzalloc(&client->dev,
+                            sizeof(struct lp8755_chip), GFP_KERNEL);
+       if (!pchip)
+               return -ENOMEM;
+
+       pchip->dev = &client->dev;
+       pchip->regmap = devm_regmap_init_i2c(client, &lp8755_regmap);
+       if (IS_ERR(pchip->regmap)) {
+               ret = PTR_ERR(pchip->regmap);
+               dev_err(&client->dev, "fail to allocate regmap %d\n", ret);
+               return ret;
+       }
+       i2c_set_clientdata(client, pchip);
+
+       if (pdata != NULL) {
+               pchip->pdata = pdata;
+               pchip->mphase = pdata->mphase;
+       } else {
+               pchip->pdata = devm_kzalloc(pchip->dev,
+                                           sizeof(struct lp8755_platform_data),
+                                           GFP_KERNEL);
+               if (!pchip->pdata)
+                       return -ENOMEM;
+               ret = lp8755_init_data(pchip);
+               if (ret < 0) {
+                       dev_err(&client->dev, "fail to initialize chip\n");
+                       return ret;
+               }
+       }
+
+       ret = lp8755_regulator_init(pchip);
+       if (ret < 0) {
+               dev_err(&client->dev, "fail to initialize regulators\n");
+               goto err_regulator;
+       }
+
+       pchip->irq = client->irq;
+       ret = lp8755_int_config(pchip);
+       if (ret < 0) {
+               dev_err(&client->dev, "fail to irq config\n");
+               goto err_irq;
+       }
+
+       return ret;
+
+err_irq:
+       for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++)
+               regulator_unregister(pchip->rdev[icnt]);
+
+err_regulator:
+       /* output disable */
+       for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+               lp8755_write(pchip, icnt, 0x00);
+
+       return ret;
+}
+
+static int lp8755_remove(struct i2c_client *client)
+{
+       int icnt;
+       struct lp8755_chip *pchip = i2c_get_clientdata(client);
+
+       for (icnt = 0; icnt < mphase_buck[pchip->mphase].nreg; icnt++)
+               regulator_unregister(pchip->rdev[icnt]);
+
+       for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++)
+               lp8755_write(pchip, icnt, 0x00);
+
+       if (pchip->irq != 0)
+               free_irq(pchip->irq, pchip);
+
+       return 0;
+}
+
+static const struct i2c_device_id lp8755_id[] = {
+       {LP8755_NAME, 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lp8755_id);
+
+static struct i2c_driver lp8755_i2c_driver = {
+       .driver = {
+                  .name = LP8755_NAME,
+                  },
+       .probe = lp8755_probe,
+       .remove = lp8755_remove,
+       .id_table = lp8755_id,
+};
+
+static int __init lp8755_init(void)
+{
+       return i2c_add_driver(&lp8755_i2c_driver);
+}
+
+subsys_initcall(lp8755_init);
+
+static void __exit lp8755_exit(void)
+{
+       i2c_del_driver(&lp8755_i2c_driver);
+}
+
+module_exit(lp8755_exit);
+
+MODULE_DESCRIPTION("Texas Instruments lp8755 driver");
+MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_LICENSE("GPL v2");
index aef3f2b0c5ea433e45647f1c10a7a429d416a234..97891a7ea7b222b59c02420dcbecaadd61530b6c 100644 (file)
@@ -103,16 +103,6 @@ static const int lp8788_buck_vtbl[] = {
        1950000, 2000000,
 };
 
-static const u8 buck1_vout_addr[] = {
-       LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1,
-       LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3,
-};
-
-static const u8 buck2_vout_addr[] = {
-       LP8788_BUCK2_VOUT0, LP8788_BUCK2_VOUT1,
-       LP8788_BUCK2_VOUT2, LP8788_BUCK2_VOUT3,
-};
-
 static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
 {
        struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs;
@@ -235,7 +225,7 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
                        lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
                        idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
                }
-               addr = buck1_vout_addr[idx];
+               addr = LP8788_BUCK1_VOUT0 + idx;
                break;
        case BUCK2:
                if (mode == EXTPIN) {
@@ -258,7 +248,7 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
                        lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
                        idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S;
                }
-               addr = buck2_vout_addr[idx];
+               addr = LP8788_BUCK2_VOUT0 + idx;
                break;
        default:
                goto err;
@@ -429,7 +419,8 @@ static struct regulator_desc lp8788_buck_desc[] = {
        },
 };
 
-static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
+static int lp8788_dvs_gpio_request(struct platform_device *pdev,
+                               struct lp8788_buck *buck,
                                enum lp8788_buck_id id)
 {
        struct lp8788_platform_data *pdata = buck->lp->pdata;
@@ -440,7 +431,7 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
        switch (id) {
        case BUCK1:
                gpio = pdata->buck1_dvs->gpio;
-               ret = devm_gpio_request_one(buck->lp->dev, gpio, DVS_LOW,
+               ret = devm_gpio_request_one(&pdev->dev, gpio, DVS_LOW,
                                            b1_name);
                if (ret)
                        return ret;
@@ -448,9 +439,9 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
                buck->dvs = pdata->buck1_dvs;
                break;
        case BUCK2:
-               for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) {
+               for (i = 0; i < LP8788_NUM_BUCK2_DVS; i++) {
                        gpio = pdata->buck2_dvs->gpio[i];
-                       ret = devm_gpio_request_one(buck->lp->dev, gpio,
+                       ret = devm_gpio_request_one(&pdev->dev, gpio,
                                                    DVS_LOW, b2_name[i]);
                        if (ret)
                                return ret;
@@ -464,7 +455,8 @@ static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
        return 0;
 }
 
-static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
+static int lp8788_init_dvs(struct platform_device *pdev,
+                       struct lp8788_buck *buck, enum lp8788_buck_id id)
 {
        struct lp8788_platform_data *pdata = buck->lp->pdata;
        u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M };
@@ -472,7 +464,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
        u8 default_dvs_mode[] = { LP8788_BUCK1_DVS_I2C, LP8788_BUCK2_DVS_I2C };
 
        /* no dvs for buck3, 4 */
-       if (id == BUCK3 || id == BUCK4)
+       if (id > BUCK2)
                return 0;
 
        /* no dvs platform data, then dvs will be selected by I2C registers */
@@ -483,7 +475,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
                (id == BUCK2 && !pdata->buck2_dvs))
                goto set_default_dvs_mode;
 
-       if (lp8788_dvs_gpio_request(buck, id))
+       if (lp8788_dvs_gpio_request(pdev, buck, id))
                goto set_default_dvs_mode;
 
        return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id],
@@ -503,17 +495,20 @@ static int lp8788_buck_probe(struct platform_device *pdev)
        struct regulator_dev *rdev;
        int ret;
 
-       buck = devm_kzalloc(lp->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
+       if (id >= LP8788_NUM_BUCKS)
+               return -EINVAL;
+
+       buck = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
        if (!buck)
                return -ENOMEM;
 
        buck->lp = lp;
 
-       ret = lp8788_init_dvs(buck, id);
+       ret = lp8788_init_dvs(pdev, buck, id);
        if (ret)
                return ret;
 
-       cfg.dev = lp->dev;
+       cfg.dev = pdev->dev.parent;
        cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL;
        cfg.driver_data = buck;
        cfg.regmap = lp->regmap;
@@ -521,7 +516,7 @@ static int lp8788_buck_probe(struct platform_device *pdev)
        rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
        if (IS_ERR(rdev)) {
                ret = PTR_ERR(rdev);
-               dev_err(lp->dev, "BUCK%d regulator register err = %d\n",
+               dev_err(&pdev->dev, "BUCK%d regulator register err = %d\n",
                                id + 1, ret);
                return ret;
        }
index 3792741708ce65b4b0bbb3449643baef4a21b22c..cd5a14ad9263c7d0e095eab699620032d4286987 100644 (file)
 #define ENABLE                         GPIOF_OUT_INIT_HIGH
 #define DISABLE                                GPIOF_OUT_INIT_LOW
 
-enum lp8788_enable_mode {
-       REGISTER,
-       EXTPIN,
-};
-
 enum lp8788_ldo_id {
        DLDO1,
        DLDO2,
@@ -189,114 +184,38 @@ static enum lp8788_ldo_id lp8788_aldo_id[] = {
        ALDO10,
 };
 
-/* DLDO 7, 9 and 11, ALDO 1 ~ 5 and 7
-   : can be enabled either by external pin or by i2c register */
-static enum lp8788_enable_mode
-lp8788_get_ldo_enable_mode(struct lp8788_ldo *ldo, enum lp8788_ldo_id id)
-{
-       int ret;
-       u8 val, mask;
-
-       ret = lp8788_read_byte(ldo->lp, LP8788_EN_SEL, &val);
-       if (ret)
-               return ret;
-
-       switch (id) {
-       case DLDO7:
-               mask =  LP8788_EN_SEL_DLDO7_M;
-               break;
-       case DLDO9:
-       case DLDO11:
-               mask =  LP8788_EN_SEL_DLDO911_M;
-               break;
-       case ALDO1:
-               mask =  LP8788_EN_SEL_ALDO1_M;
-               break;
-       case ALDO2 ... ALDO4:
-               mask =  LP8788_EN_SEL_ALDO234_M;
-               break;
-       case ALDO5:
-               mask =  LP8788_EN_SEL_ALDO5_M;
-               break;
-       case ALDO7:
-               mask =  LP8788_EN_SEL_ALDO7_M;
-               break;
-       default:
-               return REGISTER;
-       }
-
-       return val & mask ? EXTPIN : REGISTER;
-}
-
-static int lp8788_ldo_ctrl_by_extern_pin(struct lp8788_ldo *ldo, int pinstate)
-{
-       struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
-
-       if (!pin)
-               return -EINVAL;
-
-       if (gpio_is_valid(pin->gpio))
-               gpio_set_value(pin->gpio, pinstate);
-
-       return 0;
-}
-
-static int lp8788_ldo_is_enabled_by_extern_pin(struct lp8788_ldo *ldo)
-{
-       struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
-
-       if (!pin)
-               return -EINVAL;
-
-       return gpio_get_value(pin->gpio) ? 1 : 0;
-}
-
 static int lp8788_ldo_enable(struct regulator_dev *rdev)
 {
        struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
-       enum lp8788_ldo_id id = rdev_get_id(rdev);
-       enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
 
-       switch (mode) {
-       case EXTPIN:
-               return lp8788_ldo_ctrl_by_extern_pin(ldo, ENABLE);
-       case REGISTER:
+       if (ldo->en_pin) {
+               gpio_set_value(ldo->en_pin->gpio, ENABLE);
+               return 0;
+       } else {
                return regulator_enable_regmap(rdev);
-       default:
-               return -EINVAL;
        }
 }
 
 static int lp8788_ldo_disable(struct regulator_dev *rdev)
 {
        struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
-       enum lp8788_ldo_id id = rdev_get_id(rdev);
-       enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
 
-       switch (mode) {
-       case EXTPIN:
-               return lp8788_ldo_ctrl_by_extern_pin(ldo, DISABLE);
-       case REGISTER:
+       if (ldo->en_pin) {
+               gpio_set_value(ldo->en_pin->gpio, DISABLE);
+               return 0;
+       } else {
                return regulator_disable_regmap(rdev);
-       default:
-               return -EINVAL;
        }
 }
 
 static int lp8788_ldo_is_enabled(struct regulator_dev *rdev)
 {
        struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
-       enum lp8788_ldo_id id = rdev_get_id(rdev);
-       enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
 
-       switch (mode) {
-       case EXTPIN:
-               return lp8788_ldo_is_enabled_by_extern_pin(ldo);
-       case REGISTER:
+       if (ldo->en_pin)
+               return gpio_get_value(ldo->en_pin->gpio) ? 1 : 0;
+       else
                return regulator_is_enabled_regmap(rdev);
-       default:
-               return -EINVAL;
-       }
 }
 
 static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
@@ -616,10 +535,11 @@ static struct regulator_desc lp8788_aldo_desc[] = {
        },
 };
 
-static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
+static int lp8788_gpio_request_ldo_en(struct platform_device *pdev,
+                               struct lp8788_ldo *ldo,
                                enum lp8788_ext_ldo_en_id id)
 {
-       struct device *dev = ldo->lp->dev;
+       struct device *dev = &pdev->dev;
        struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
        int ret, gpio, pinstate;
        char *name[] = {
@@ -647,7 +567,8 @@ static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
        return ret;
 }
 
-static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
+static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
+                                       struct lp8788_ldo *ldo,
                                        enum lp8788_ldo_id id)
 {
        int ret;
@@ -693,9 +614,11 @@ static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
 
        ldo->en_pin = pdata->ldo_pin[enable_id];
 
-       ret = lp8788_gpio_request_ldo_en(ldo, enable_id);
-       if (ret)
+       ret = lp8788_gpio_request_ldo_en(pdev, ldo, enable_id);
+       if (ret) {
+               ldo->en_pin = NULL;
                goto set_default_ldo_enable_mode;
+       }
 
        return ret;
 
@@ -712,16 +635,16 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
        struct regulator_dev *rdev;
        int ret;
 
-       ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+       ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
        if (!ldo)
                return -ENOMEM;
 
        ldo->lp = lp;
-       ret = lp8788_config_ldo_enable_mode(ldo, lp8788_dldo_id[id]);
+       ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_dldo_id[id]);
        if (ret)
                return ret;
 
-       cfg.dev = lp->dev;
+       cfg.dev = pdev->dev.parent;
        cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
        cfg.driver_data = ldo;
        cfg.regmap = lp->regmap;
@@ -729,7 +652,7 @@ static int lp8788_dldo_probe(struct platform_device *pdev)
        rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
        if (IS_ERR(rdev)) {
                ret = PTR_ERR(rdev);
-               dev_err(lp->dev, "DLDO%d regulator register err = %d\n",
+               dev_err(&pdev->dev, "DLDO%d regulator register err = %d\n",
                                id + 1, ret);
                return ret;
        }
@@ -768,16 +691,16 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
        struct regulator_dev *rdev;
        int ret;
 
-       ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+       ldo = devm_kzalloc(&pdev->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
        if (!ldo)
                return -ENOMEM;
 
        ldo->lp = lp;
-       ret = lp8788_config_ldo_enable_mode(ldo, lp8788_aldo_id[id]);
+       ret = lp8788_config_ldo_enable_mode(pdev, ldo, lp8788_aldo_id[id]);
        if (ret)
                return ret;
 
-       cfg.dev = lp->dev;
+       cfg.dev = pdev->dev.parent;
        cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
        cfg.driver_data = ldo;
        cfg.regmap = lp->regmap;
@@ -785,7 +708,7 @@ static int lp8788_aldo_probe(struct platform_device *pdev)
        rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
        if (IS_ERR(rdev)) {
                ret = PTR_ERR(rdev);
-               dev_err(lp->dev, "ALDO%d regulator register err = %d\n",
+               dev_err(&pdev->dev, "ALDO%d regulator register err = %d\n",
                                id + 1, ret);
                return ret;
        }
index b85040caaea318b13154927bc1beb0e13aef6ad4..e4586ee8858d9cfb049db922e58ca92d1b4b7e48 100644 (file)
@@ -75,13 +75,14 @@ static int max77686_buck_set_suspend_disable(struct regulator_dev *rdev)
 {
        unsigned int val;
        struct max77686_data *max77686 = rdev_get_drvdata(rdev);
+        int id = rdev_get_id(rdev);
 
-       if (rdev->desc->id == MAX77686_BUCK1)
+       if (id == MAX77686_BUCK1)
                val = 0x1;
        else
                val = 0x1 << MAX77686_OPMODE_BUCK234_SHIFT;
 
-       max77686->opmode[rdev->desc->id] = val;
+       max77686->opmode[id] = val;
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask,
                                  val);
@@ -93,9 +94,10 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
 {
        struct max77686_data *max77686 = rdev_get_drvdata(rdev);
        unsigned int val;
+        int id = rdev_get_id(rdev);
 
        /* BUCK[5-9] doesn't support this feature */
-       if (rdev->desc->id >= MAX77686_BUCK5)
+       if (id >= MAX77686_BUCK5)
                return 0;
 
        switch (mode) {
@@ -111,7 +113,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
-       max77686->opmode[rdev->desc->id] = val;
+       max77686->opmode[id] = val;
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask,
                                  val);
@@ -140,7 +142,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
                return -EINVAL;
        }
 
-       max77686->opmode[rdev->desc->id] = val;
+       max77686->opmode[rdev_get_id(rdev)] = val;
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask,
                                  val);
@@ -152,7 +154,7 @@ static int max77686_enable(struct regulator_dev *rdev)
 
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask,
-                                 max77686->opmode[rdev->desc->id]);
+                                 max77686->opmode[rdev_get_id(rdev)]);
 }
 
 static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
@@ -379,9 +381,10 @@ static struct regulator_desc regulators[] = {
 };
 
 #ifdef CONFIG_OF
-static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max77686_platform_data *pdata)
 {
+       struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct device_node *pmic_np, *regulators_np;
        struct max77686_regulator_data *rdata;
        struct of_regulator_match rmatch;
@@ -390,15 +393,15 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
        pmic_np = iodev->dev->of_node;
        regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
        if (!regulators_np) {
-               dev_err(iodev->dev, "could not find regulators sub-node\n");
+               dev_err(&pdev->dev, "could not find regulators sub-node\n");
                return -EINVAL;
        }
 
        pdata->num_regulators = ARRAY_SIZE(regulators);
-       rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+       rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
                             pdata->num_regulators, GFP_KERNEL);
        if (!rdata) {
-               dev_err(iodev->dev,
+               dev_err(&pdev->dev,
                        "could not allocate memory for regulator data\n");
                return -ENOMEM;
        }
@@ -407,7 +410,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
                rmatch.name = regulators[i].name;
                rmatch.init_data = NULL;
                rmatch.of_node = NULL;
-               of_regulator_match(iodev->dev, regulators_np, &rmatch, 1);
+               of_regulator_match(&pdev->dev, regulators_np, &rmatch, 1);
                rdata[i].initdata = rmatch.init_data;
                rdata[i].of_node = rmatch.of_node;
        }
@@ -417,7 +420,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
        return 0;
 }
 #else
-static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max77686_platform_data *pdata)
 {
        return 0;
@@ -440,7 +443,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)
        }
 
        if (iodev->dev->of_node) {
-               ret = max77686_pmic_dt_parse_pdata(iodev, pdata);
+               ret = max77686_pmic_dt_parse_pdata(pdev, pdata);
                if (ret)
                        return ret;
        }
index d1a77512d83e69501c2a4a3a3e8c3f09b75f7ba6..4568c15fa78dea0eafda170f64f9fc86db251faa 100644 (file)
@@ -224,11 +224,11 @@ static struct of_regulator_match max8907_matches[] = {
 
 static int max8907_regulator_parse_dt(struct platform_device *pdev)
 {
-       struct device_node *np = pdev->dev.parent->of_node;
-       struct device_node *regulators;
+       struct device_node *np, *regulators;
        int ret;
 
-       if (!pdev->dev.parent->of_node)
+       np = of_node_get(pdev->dev.parent->of_node);
+       if (!np)
                return 0;
 
        regulators = of_find_node_by_name(np, "regulators");
@@ -237,9 +237,9 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       ret = of_regulator_match(pdev->dev.parent, regulators,
-                                max8907_matches,
+       ret = of_regulator_match(&pdev->dev, regulators, max8907_matches,
                                 ARRAY_SIZE(max8907_matches));
+       of_node_put(regulators);
        if (ret < 0) {
                dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
                        ret);
index 446a854455535b4603fa2584f09498b0e557cc0e..0d5f64a805a039561390b975dbfb72551cc4f5d1 100644 (file)
@@ -252,7 +252,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
 {
        struct device_node *nproot, *np;
        int rcount;
-       nproot = pdev->dev.parent->of_node;
+       nproot = of_node_get(pdev->dev.parent->of_node);
        if (!nproot)
                return -ENODEV;
        np = of_find_node_by_name(nproot, "regulators");
@@ -263,6 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
 
        rcount = of_regulator_match(&pdev->dev, np,
                                &max8925_regulator_matches[ridx], 1);
+       of_node_put(np);
        if (rcount < 0)
                return -ENODEV;
        config->init_data =     max8925_regulator_matches[ridx].init_data;
index df0eafb0dc7e63a1f4a9e8bfb989d91caa1d9f63..0ac7a87519b46515e3c8b360a95778eea28014fd 100644 (file)
@@ -54,6 +54,13 @@ struct max8997_data {
        u8 saved_states[MAX8997_REG_MAX];
 };
 
+static const unsigned int safeoutvolt[] = {
+       4850000,
+       4900000,
+       4950000,
+       3300000,
+};
+
 static inline void max8997_set_gpio(struct max8997_data *max8997)
 {
        int set3 = (max8997->buck125_gpioindex) & 0x1;
@@ -71,26 +78,26 @@ struct voltage_map_desc {
        int step;
 };
 
-/* Voltage maps in mV */
+/* Voltage maps in uV */
 static const struct voltage_map_desc ldo_voltage_map_desc = {
-       .min = 800,     .max = 3950,    .step = 50,
+       .min = 800000,  .max = 3950000, .step = 50000,
 }; /* LDO1 ~ 18, 21 all */
 
 static const struct voltage_map_desc buck1245_voltage_map_desc = {
-       .min = 650,     .max = 2225,    .step = 25,
+       .min = 650000,  .max = 2225000, .step = 25000,
 }; /* Buck1, 2, 4, 5 */
 
 static const struct voltage_map_desc buck37_voltage_map_desc = {
-       .min = 750,     .max = 3900,    .step = 50,
+       .min = 750000,  .max = 3900000, .step = 50000,
 }; /* Buck3, 7 */
 
-/* current map in mA */
+/* current map in uA */
 static const struct voltage_map_desc charger_current_map_desc = {
-       .min = 200,     .max = 950,     .step = 50,
+       .min = 200000,  .max = 950000,  .step = 50000,
 };
 
 static const struct voltage_map_desc topoff_current_map_desc = {
-       .min = 50,      .max = 200,     .step = 10,
+       .min = 50000,   .max = 200000,  .step = 10000,
 };
 
 static const struct voltage_map_desc *reg_voltage_map[] = {
@@ -130,29 +137,6 @@ static const struct voltage_map_desc *reg_voltage_map[] = {
        [MAX8997_CHARGER_TOPOFF] = &topoff_current_map_desc,
 };
 
-static int max8997_list_voltage_safeout(struct regulator_dev *rdev,
-               unsigned int selector)
-{
-       int rid = rdev_get_id(rdev);
-
-       if (rid == MAX8997_ESAFEOUT1 || rid == MAX8997_ESAFEOUT2) {
-               switch (selector) {
-               case 0:
-                       return 4850000;
-               case 1:
-                       return 4900000;
-               case 2:
-                       return 4950000;
-               case 3:
-                       return 3300000;
-               default:
-                       return -EINVAL;
-               }
-       }
-
-       return -EINVAL;
-}
-
 static int max8997_list_voltage_charger_cv(struct regulator_dev *rdev,
                unsigned int selector)
 {
@@ -194,7 +178,7 @@ static int max8997_list_voltage(struct regulator_dev *rdev,
        if (val > desc->max)
                return -EINVAL;
 
-       return val * 1000;
+       return val;
 }
 
 static int max8997_get_enable_register(struct regulator_dev *rdev,
@@ -485,7 +469,6 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
 {
        struct max8997_data *max8997 = rdev_get_drvdata(rdev);
        struct i2c_client *i2c = max8997->iodev->i2c;
-       int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
        const struct voltage_map_desc *desc;
        int rid = rdev_get_id(rdev);
        int i, reg, shift, mask, ret;
@@ -509,7 +492,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
 
        desc = reg_voltage_map[rid];
 
-       i = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+       i = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
        if (i < 0)
                return i;
 
@@ -523,7 +506,7 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
        return ret;
 }
 
-static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
+static int max8997_set_voltage_buck_time_sel(struct regulator_dev *rdev,
                                                unsigned int old_selector,
                                                unsigned int new_selector)
 {
@@ -557,7 +540,7 @@ static int max8997_set_voltage_ldobuck_time_sel(struct regulator_dev *rdev,
        case MAX8997_BUCK4:
        case MAX8997_BUCK5:
                return DIV_ROUND_UP(desc->step * (new_selector - old_selector),
-                                   max8997->ramp_delay);
+                                   max8997->ramp_delay * 1000);
        }
 
        return 0;
@@ -656,7 +639,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
        const struct voltage_map_desc *desc;
        int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
        bool gpio_dvs_mode = false;
-       int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
 
        if (rid < MAX8997_BUCK1 || rid > MAX8997_BUCK7)
                return -EINVAL;
@@ -681,7 +663,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
                                                selector);
 
        desc = reg_voltage_map[rid];
-       new_val = max8997_get_voltage_proper_val(desc, min_vol, max_vol);
+       new_val = max8997_get_voltage_proper_val(desc, min_uV, max_uV);
        if (new_val < 0)
                return new_val;
 
@@ -722,49 +704,23 @@ out:
        return 0;
 }
 
-static const int safeoutvolt[] = {
-       3300000,
-       4850000,
-       4900000,
-       4950000,
-};
-
 /* For SAFEOUT1 and SAFEOUT2 */
-static int max8997_set_voltage_safeout(struct regulator_dev *rdev,
-               int min_uV, int max_uV, unsigned *selector)
+static int max8997_set_voltage_safeout_sel(struct regulator_dev *rdev,
+                                          unsigned selector)
 {
        struct max8997_data *max8997 = rdev_get_drvdata(rdev);
        struct i2c_client *i2c = max8997->iodev->i2c;
        int rid = rdev_get_id(rdev);
        int reg, shift = 0, mask, ret;
-       int i = 0;
-       u8 val;
 
        if (rid != MAX8997_ESAFEOUT1 && rid != MAX8997_ESAFEOUT2)
                return -EINVAL;
 
-       for (i = 0; i < ARRAY_SIZE(safeoutvolt); i++) {
-               if (min_uV <= safeoutvolt[i] &&
-                               max_uV >= safeoutvolt[i])
-                       break;
-       }
-
-       if (i >= ARRAY_SIZE(safeoutvolt))
-               return -EINVAL;
-
-       if (i == 0)
-               val = 0x3;
-       else
-               val = i - 1;
-
        ret = max8997_get_voltage_register(rdev, &reg, &shift, &mask);
        if (ret)
                return ret;
 
-       ret = max8997_update_reg(i2c, reg, val << shift, mask << shift);
-       *selector = val;
-
-       return ret;
+       return max8997_update_reg(i2c, reg, selector << shift, mask << shift);
 }
 
 static int max8997_reg_disable_suspend(struct regulator_dev *rdev)
@@ -801,7 +757,6 @@ static struct regulator_ops max8997_ldo_ops = {
        .disable                = max8997_reg_disable,
        .get_voltage_sel        = max8997_get_voltage_sel,
        .set_voltage            = max8997_set_voltage_ldobuck,
-       .set_voltage_time_sel   = max8997_set_voltage_ldobuck_time_sel,
        .set_suspend_disable    = max8997_reg_disable_suspend,
 };
 
@@ -812,7 +767,7 @@ static struct regulator_ops max8997_buck_ops = {
        .disable                = max8997_reg_disable,
        .get_voltage_sel        = max8997_get_voltage_sel,
        .set_voltage            = max8997_set_voltage_buck,
-       .set_voltage_time_sel   = max8997_set_voltage_ldobuck_time_sel,
+       .set_voltage_time_sel   = max8997_set_voltage_buck_time_sel,
        .set_suspend_disable    = max8997_reg_disable_suspend,
 };
 
@@ -825,12 +780,12 @@ static struct regulator_ops max8997_fixedvolt_ops = {
 };
 
 static struct regulator_ops max8997_safeout_ops = {
-       .list_voltage           = max8997_list_voltage_safeout,
+       .list_voltage           = regulator_list_voltage_table,
        .is_enabled             = max8997_reg_is_enabled,
        .enable                 = max8997_reg_enable,
        .disable                = max8997_reg_disable,
        .get_voltage_sel        = max8997_get_voltage_sel,
-       .set_voltage            = max8997_set_voltage_safeout,
+       .set_voltage_sel        = max8997_set_voltage_safeout_sel,
        .set_suspend_disable    = max8997_reg_disable_suspend,
 };
 
@@ -936,7 +891,7 @@ static struct regulator_desc regulators[] = {
 };
 
 #ifdef CONFIG_OF
-static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
                        struct max8997_platform_data *pdata,
                        struct device_node *pmic_np)
 {
@@ -946,7 +901,7 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
                gpio = of_get_named_gpio(pmic_np,
                                        "max8997,pmic-buck125-dvs-gpios", i);
                if (!gpio_is_valid(gpio)) {
-                       dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+                       dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
                        return -EINVAL;
                }
                pdata->buck125_gpios[i] = gpio;
@@ -954,35 +909,34 @@ static int max8997_pmic_dt_parse_dvs_gpio(struct max8997_dev *iodev,
        return 0;
 }
 
-static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max8997_platform_data *pdata)
 {
+       struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct device_node *pmic_np, *regulators_np, *reg_np;
        struct max8997_regulator_data *rdata;
        unsigned int i, dvs_voltage_nr = 1, ret;
 
-       pmic_np = iodev->dev->of_node;
+       pmic_np = of_node_get(iodev->dev->of_node);
        if (!pmic_np) {
-               dev_err(iodev->dev, "could not find pmic sub-node\n");
+               dev_err(&pdev->dev, "could not find pmic sub-node\n");
                return -ENODEV;
        }
 
        regulators_np = of_find_node_by_name(pmic_np, "regulators");
        if (!regulators_np) {
-               dev_err(iodev->dev, "could not find regulators sub-node\n");
+               dev_err(&pdev->dev, "could not find regulators sub-node\n");
                return -EINVAL;
        }
 
        /* count the number of regulators to be supported in pmic */
-       pdata->num_regulators = 0;
-       for_each_child_of_node(regulators_np, reg_np)
-               pdata->num_regulators++;
+       pdata->num_regulators = of_get_child_count(regulators_np);
 
-       rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+       rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
                                pdata->num_regulators, GFP_KERNEL);
        if (!rdata) {
-               dev_err(iodev->dev, "could not allocate memory for "
-                                               "regulator data\n");
+               of_node_put(regulators_np);
+               dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
                return -ENOMEM;
        }
 
@@ -993,17 +947,18 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
                                break;
 
                if (i == ARRAY_SIZE(regulators)) {
-                       dev_warn(iodev->dev, "don't know how to configure "
-                               "regulator %s\n", reg_np->name);
+                       dev_warn(&pdev->dev, "don't know how to configure regulator %s\n",
+                                reg_np->name);
                        continue;
                }
 
                rdata->id = i;
-               rdata->initdata = of_get_regulator_init_data(
-                                               iodev->dev, reg_np);
+               rdata->initdata = of_get_regulator_init_data(&pdev->dev,
+                                                            reg_np);
                rdata->reg_node = reg_np;
                rdata++;
        }
+       of_node_put(regulators_np);
 
        if (of_get_property(pmic_np, "max8997,pmic-buck1-uses-gpio-dvs", NULL))
                pdata->buck1_gpiodvs = true;
@@ -1016,7 +971,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
 
        if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
                                                pdata->buck5_gpiodvs) {
-               ret = max8997_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
+               ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
                if (ret)
                        return -EINVAL;
 
@@ -1027,8 +982,7 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
                } else {
                        if (pdata->buck125_default_idx >= 8) {
                                pdata->buck125_default_idx = 0;
-                               dev_info(iodev->dev, "invalid value for "
-                               "default dvs index, using 0 instead\n");
+                               dev_info(&pdev->dev, "invalid value for default dvs index, using 0 instead\n");
                        }
                }
 
@@ -1042,28 +996,28 @@ static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
        if (of_property_read_u32_array(pmic_np,
                                "max8997,pmic-buck1-dvs-voltage",
                                pdata->buck1_voltage, dvs_voltage_nr)) {
-               dev_err(iodev->dev, "buck1 voltages not specified\n");
+               dev_err(&pdev->dev, "buck1 voltages not specified\n");
                return -EINVAL;
        }
 
        if (of_property_read_u32_array(pmic_np,
                                "max8997,pmic-buck2-dvs-voltage",
                                pdata->buck2_voltage, dvs_voltage_nr)) {
-               dev_err(iodev->dev, "buck2 voltages not specified\n");
+               dev_err(&pdev->dev, "buck2 voltages not specified\n");
                return -EINVAL;
        }
 
        if (of_property_read_u32_array(pmic_np,
                                "max8997,pmic-buck5-dvs-voltage",
                                pdata->buck5_voltage, dvs_voltage_nr)) {
-               dev_err(iodev->dev, "buck5 voltages not specified\n");
+               dev_err(&pdev->dev, "buck5 voltages not specified\n");
                return -EINVAL;
        }
 
        return 0;
 }
 #else
-static int max8997_pmic_dt_parse_pdata(struct max8997_dev *iodev,
+static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct max8997_platform_data *pdata)
 {
        return 0;
@@ -1087,7 +1041,7 @@ static int max8997_pmic_probe(struct platform_device *pdev)
        }
 
        if (iodev->dev->of_node) {
-               ret = max8997_pmic_dt_parse_pdata(iodev, pdata);
+               ret = max8997_pmic_dt_parse_pdata(pdev, pdata);
                if (ret)
                        return ret;
        }
@@ -1123,8 +1077,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                max8997->buck1_vol[i] = ret =
                        max8997_get_voltage_proper_val(
                                        &buck1245_voltage_map_desc,
-                                       pdata->buck1_voltage[i] / 1000,
-                                       pdata->buck1_voltage[i] / 1000 +
+                                       pdata->buck1_voltage[i],
+                                       pdata->buck1_voltage[i] +
                                        buck1245_voltage_map_desc.step);
                if (ret < 0)
                        goto err_out;
@@ -1132,8 +1086,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                max8997->buck2_vol[i] = ret =
                        max8997_get_voltage_proper_val(
                                        &buck1245_voltage_map_desc,
-                                       pdata->buck2_voltage[i] / 1000,
-                                       pdata->buck2_voltage[i] / 1000 +
+                                       pdata->buck2_voltage[i],
+                                       pdata->buck2_voltage[i] +
                                        buck1245_voltage_map_desc.step);
                if (ret < 0)
                        goto err_out;
@@ -1141,8 +1095,8 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                max8997->buck5_vol[i] = ret =
                        max8997_get_voltage_proper_val(
                                        &buck1245_voltage_map_desc,
-                                       pdata->buck5_voltage[i] / 1000,
-                                       pdata->buck5_voltage[i] / 1000 +
+                                       pdata->buck5_voltage[i],
+                                       pdata->buck5_voltage[i] +
                                        buck1245_voltage_map_desc.step);
                if (ret < 0)
                        goto err_out;
@@ -1236,13 +1190,15 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                int id = pdata->regulators[i].id;
 
                desc = reg_voltage_map[id];
-               if (desc)
+               if (desc) {
                        regulators[id].n_voltages =
                                (desc->max - desc->min) / desc->step + 1;
-               else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2)
-                       regulators[id].n_voltages = 4;
-               else if (id == MAX8997_CHARGER_CV)
+               } else if (id == MAX8997_ESAFEOUT1 || id == MAX8997_ESAFEOUT2) {
+                       regulators[id].volt_table = safeoutvolt;
+                       regulators[id].n_voltages = ARRAY_SIZE(safeoutvolt);
+               } else if (id == MAX8997_CHARGER_CV) {
                        regulators[id].n_voltages = 16;
+               }
 
                config.dev = max8997->dev;
                config.init_data = pdata->regulators[i].initdata;
index b821d08eb64ae84b643f2cfe93cd0764738d21c9..b588f07c7cad20b9d1f8f47ce03d4f9333d3e44b 100644 (file)
@@ -51,39 +51,39 @@ struct voltage_map_desc {
        int step;
 };
 
-/* Voltage maps */
+/* Voltage maps in uV*/
 static const struct voltage_map_desc ldo23_voltage_map_desc = {
-       .min = 800,     .step = 50,     .max = 1300,
+       .min = 800000,  .step = 50000,  .max = 1300000,
 };
 static const struct voltage_map_desc ldo456711_voltage_map_desc = {
-       .min = 1600,    .step = 100,    .max = 3600,
+       .min = 1600000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc ldo8_voltage_map_desc = {
-       .min = 3000,    .step = 100,    .max = 3600,
+       .min = 3000000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc ldo9_voltage_map_desc = {
-       .min = 2800,    .step = 100,    .max = 3100,
+       .min = 2800000, .step = 100000, .max = 3100000,
 };
 static const struct voltage_map_desc ldo10_voltage_map_desc = {
-       .min = 950,     .step = 50,     .max = 1300,
+       .min = 950000,  .step = 50000,  .max = 1300000,
 };
 static const struct voltage_map_desc ldo1213_voltage_map_desc = {
-       .min = 800,     .step = 100,    .max = 3300,
+       .min = 800000,  .step = 100000, .max = 3300000,
 };
 static const struct voltage_map_desc ldo1415_voltage_map_desc = {
-       .min = 1200,    .step = 100,    .max = 3300,
+       .min = 1200000, .step = 100000, .max = 3300000,
 };
 static const struct voltage_map_desc ldo1617_voltage_map_desc = {
-       .min = 1600,    .step = 100,    .max = 3600,
+       .min = 1600000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc buck12_voltage_map_desc = {
-       .min = 750,     .step = 25,     .max = 1525,
+       .min = 750000,  .step = 25000,  .max = 1525000,
 };
 static const struct voltage_map_desc buck3_voltage_map_desc = {
-       .min = 1600,    .step = 100,    .max = 3600,
+       .min = 1600000, .step = 100000, .max = 3600000,
 };
 static const struct voltage_map_desc buck4_voltage_map_desc = {
-       .min = 800,     .step = 100,    .max = 2300,
+       .min = 800000,  .step = 100000, .max = 2300000,
 };
 
 static const struct voltage_map_desc *ldo_voltage_map[] = {
@@ -311,25 +311,13 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
                dev_get_platdata(max8998->iodev->dev);
        struct i2c_client *i2c = max8998->iodev->i2c;
        int buck = rdev_get_id(rdev);
-       int reg, shift = 0, mask, ret;
-       int j, previous_sel;
+       int reg, shift = 0, mask, ret, j;
        static u8 buck1_last_val;
 
        ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
        if (ret)
                return ret;
 
-       previous_sel = max8998_get_voltage_sel(rdev);
-
-       /* Check if voltage needs to be changed */
-       /* if previous_voltage equal new voltage, return */
-       if (previous_sel == selector) {
-               dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
-                       regulator_list_voltage_linear(rdev, previous_sel),
-                       regulator_list_voltage_linear(rdev, selector));
-               return ret;
-       }
-
        switch (buck) {
        case MAX8998_BUCK1:
                dev_dbg(max8998->dev,
@@ -445,9 +433,9 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
        if (max8998->iodev->type == TYPE_MAX8998 && !(val & MAX8998_ENRAMP))
                return 0;
 
-       difference = (new_selector - old_selector) * desc->step;
+       difference = (new_selector - old_selector) * desc->step / 1000;
        if (difference > 0)
-               return difference / ((val & 0x0f) + 1);
+               return DIV_ROUND_UP(difference, (val & 0x0f) + 1);
 
        return 0;
 }
@@ -702,7 +690,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage1 / 1000))
+                      < pdata->buck1_voltage1)
                        i++;
                max8998->buck1_vol[0] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
@@ -713,7 +701,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage2 / 1000))
+                      < pdata->buck1_voltage2)
                        i++;
 
                max8998->buck1_vol[1] = i;
@@ -725,7 +713,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage3 / 1000))
+                      < pdata->buck1_voltage3)
                        i++;
 
                max8998->buck1_vol[2] = i;
@@ -737,7 +725,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck1_voltage4 / 1000))
+                      < pdata->buck1_voltage4)
                        i++;
 
                max8998->buck1_vol[3] = i;
@@ -763,7 +751,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck2_voltage1 / 1000))
+                      < pdata->buck2_voltage1)
                        i++;
                max8998->buck2_vol[0] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
@@ -774,7 +762,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                i = 0;
                while (buck12_voltage_map_desc.min +
                       buck12_voltage_map_desc.step*i
-                      < (pdata->buck2_voltage2 / 1000))
+                      < pdata->buck2_voltage2)
                        i++;
                max8998->buck2_vol[1] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
@@ -792,8 +780,8 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                        int count = (desc->max - desc->min) / desc->step + 1;
 
                        regulators[index].n_voltages = count;
-                       regulators[index].min_uV = desc->min * 1000;
-                       regulators[index].uV_step = desc->step * 1000;
+                       regulators[index].min_uV = desc->min;
+                       regulators[index].uV_step = desc->step;
                }
 
                config.dev = max8998->dev;
index 0d84b1f33199ca5251c425ba898ffad4296da88f..9891aec47b57a02bcb17871e4c0caa57a152c1f7 100644 (file)
@@ -164,6 +164,14 @@ static const unsigned int mc13892_sw1[] = {
        1350000, 1375000
 };
 
+/*
+ * Note: this table is used to derive SWxVSEL by index into
+ * the array. Offset the values by the index of 1100000uV
+ * to get the actual register value for that voltage selector
+ * if the HI bit is to be set as well.
+ */
+#define MC13892_SWxHI_SEL_OFFSET               20
+
 static const unsigned int mc13892_sw[] = {
        600000,   625000,  650000,  675000,  700000,  725000,
        750000,   775000,  800000,  825000,  850000,  875000,
@@ -239,7 +247,6 @@ static const unsigned int mc13892_pwgtdrv[] = {
 };
 
 static struct regulator_ops mc13892_gpo_regulator_ops;
-/* sw regulators need special care due to the "hi bit" */
 static struct regulator_ops mc13892_sw_regulator_ops;
 
 
@@ -396,7 +403,7 @@ static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
 {
        struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
        int ret, id = rdev_get_id(rdev);
-       unsigned int val;
+       unsigned int val, selector;
 
        dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
@@ -407,12 +414,28 @@ static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev)
        if (ret)
                return ret;
 
-       val = (val & mc13892_regulators[id].vsel_mask)
-               >> mc13892_regulators[id].vsel_shift;
+       /*
+        * Figure out if the HI bit is set inside the switcher mode register
+        * since this means the selector value we return is at a different
+        * offset into the selector table.
+        *
+        * According to the MC13892 documentation note 59 (Table 47) the SW1
+        * buck switcher does not support output range programming therefore
+        * the HI bit must always remain 0. So do not do anything strange if
+        * our register is MC13892_SWITCHERS0.
+        */
+
+       selector = val & mc13892_regulators[id].vsel_mask;
+
+       if ((mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) &&
+           (val & MC13892_SWITCHERS0_SWxHI)) {
+               selector += MC13892_SWxHI_SEL_OFFSET;
+       }
 
-       dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+       dev_dbg(rdev_get_dev(rdev), "%s id: %d val: 0x%08x selector: %d\n",
+                       __func__, id, val, selector);
 
-       return val;
+       return selector;
 }
 
 static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
@@ -425,18 +448,35 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
 
        volt = rdev->desc->volt_table[selector];
        mask = mc13892_regulators[id].vsel_mask;
-       reg_value = selector << mc13892_regulators[id].vsel_shift;
-
-       if (volt > 1375000) {
-               mask |= MC13892_SWITCHERS0_SWxHI;
-               reg_value |= MC13892_SWITCHERS0_SWxHI;
-       } else if (volt < 1100000) {
-               mask |= MC13892_SWITCHERS0_SWxHI;
-               reg_value &= ~MC13892_SWITCHERS0_SWxHI;
+       reg_value = selector;
+
+       /*
+        * Don't mess with the HI bit or support HI voltage offsets for SW1.
+        *
+        * Since the get_voltage_sel callback has given a fudged value for
+        * the selector offset, we need to back out that offset if HI is
+        * to be set so we write the correct value to the register.
+        *
+        * The HI bit addition and selector offset handling COULD be more
+        * complicated by shifting and masking off the voltage selector part
+        * of the register then logical OR it back in, but since the selector
+        * is at bits 4:0 there is very little point. This makes the whole
+        * thing more readable and we do far less work.
+        */
+
+       if (mc13892_regulators[id].vsel_reg != MC13892_SWITCHERS0) {
+               if (volt > 1375000) {
+                       reg_value -= MC13892_SWxHI_SEL_OFFSET;
+                       reg_value |= MC13892_SWITCHERS0_SWxHI;
+                       mask |= MC13892_SWITCHERS0_SWxHI;
+               } else if (volt < 1100000) {
+                       reg_value &= ~MC13892_SWITCHERS0_SWxHI;
+                       mask |= MC13892_SWITCHERS0_SWxHI;
+               }
        }
 
        mc13xxx_lock(priv->mc13xxx);
-       ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, mask,
+       ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, mask,
                              reg_value);
        mc13xxx_unlock(priv->mc13xxx);
 
@@ -495,15 +535,18 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
        struct mc13xxx_regulator_init_data *mc13xxx_data;
        struct regulator_config config = { };
        int i, ret;
-       int num_regulators = 0;
+       int num_regulators = 0, num_parsed;
        u32 val;
 
        num_regulators = mc13xxx_get_num_regulators_dt(pdev);
+
        if (num_regulators <= 0 && pdata)
                num_regulators = pdata->num_regulators;
        if (num_regulators <= 0)
                return -EINVAL;
 
+       num_parsed = num_regulators;
+
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
                num_regulators * sizeof(priv->regulators[0]),
                GFP_KERNEL);
@@ -520,7 +563,7 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
        if (ret)
                goto err_unlock;
 
-       /* enable switch auto mode */
+       /* enable switch auto mode (on 2.0A silicon only) */
        if ((val & 0x0000FFFF) == 0x45d0) {
                ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
                        MC13892_SWITCHERS4_SW1MODE_M |
@@ -546,7 +589,39 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
                = mc13892_vcam_get_mode;
 
        mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
-                                       ARRAY_SIZE(mc13892_regulators));
+                                       ARRAY_SIZE(mc13892_regulators),
+                                       &num_parsed);
+
+       /*
+        * Perform a little sanity check on the regulator tree - if we found
+        * a number of regulators from mc13xxx_get_num_regulators_dt and
+        * then parsed a smaller number in mc13xxx_parse_regulators_dt then
+        * there is a regulator defined in the regulators node which has
+        * not matched any usable regulator in the driver. In this case,
+        * there is one missing and what will happen is the first regulator
+        * will get registered again.
+        *
+        * Fix this by basically making our number of registerable regulators
+        * equal to the number of regulators we parsed. We are allocating
+        * too much memory for priv, but this is unavoidable at this point.
+        *
+        * As an example of how this can happen, try making a typo in your
+        * regulators node (vviohi {} instead of viohi {}) so that the name
+        * does not match..
+        *
+        * The check will basically pass for platform data (non-DT) because
+        * mc13xxx_parse_regulators_dt for !CONFIG_OF will not touch num_parsed.
+        *
+        */
+       if (num_parsed != num_regulators) {
+               dev_warn(&pdev->dev,
+               "parsed %d != regulators %d - check your device tree!\n",
+                       num_parsed, num_regulators);
+
+               num_regulators = num_parsed;
+               priv->num_regulators = num_regulators;
+       }
+
        for (i = 0; i < num_regulators; i++) {
                struct regulator_init_data *init_data;
                struct regulator_desc *desc;
index 4ed89c6541100e838e8d6c3afb5aea3e0ad5bebf..23cf9f9c383b557b7cb7119576c5618356addfc8 100644 (file)
@@ -164,29 +164,30 @@ EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
 #ifdef CONFIG_OF
 int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
 {
-       struct device_node *parent, *child;
-       int num = 0;
+       struct device_node *parent;
+       int num;
 
        of_node_get(pdev->dev.parent->of_node);
        parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
        if (!parent)
                return -ENODEV;
 
-       for_each_child_of_node(parent, child)
-               num++;
-
+       num = of_get_child_count(parent);
+       of_node_put(parent);
        return num;
 }
 EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
 
 struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
        struct platform_device *pdev, struct mc13xxx_regulator *regulators,
-       int num_regulators)
+       int num_regulators, int *num_parsed)
 {
        struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
        struct mc13xxx_regulator_init_data *data, *p;
        struct device_node *parent, *child;
-       int i;
+       int i, parsed = 0;
+
+       *num_parsed = 0;
 
        of_node_get(pdev->dev.parent->of_node);
        parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
@@ -195,24 +196,32 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
 
        data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
                            GFP_KERNEL);
-       if (!data)
+       if (!data) {
+               of_node_put(parent);
                return NULL;
+       }
+
        p = data;
 
        for_each_child_of_node(parent, child) {
                for (i = 0; i < num_regulators; i++) {
                        if (!of_node_cmp(child->name,
                                         regulators[i].desc.name)) {
+
                                p->id = i;
                                p->init_data = of_get_regulator_init_data(
                                                        &pdev->dev, child);
                                p->node = child;
                                p++;
+
+                               parsed++;
                                break;
                        }
                }
        }
+       of_node_put(parent);
 
+       *num_parsed = parsed;
        return data;
 }
 EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
index 06c8903f182a680e7c0bb8464f227f3945cbc11f..007f83387fd6a6a0c209ceba25046144d6c80ce8 100644 (file)
@@ -39,7 +39,7 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
 extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
 extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
        struct platform_device *pdev, struct mc13xxx_regulator *regulators,
-       int num_regulators);
+       int num_regulators, int *num_parsed);
 #else
 static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
 {
@@ -48,7 +48,7 @@ static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
 
 static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
        struct platform_device *pdev, struct mc13xxx_regulator *regulators,
-       int num_regulators)
+       int num_regulators, int *num_parsed)
 {
        return NULL;
 }
index 6f684916fd7914450bfea4170ee884f5538ee281..66ca769287ab3427aa9ac8695ad5a67ca5dc77c3 100644 (file)
@@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,
        if (!dev || !node)
                return -EINVAL;
 
+       for (i = 0; i < num_matches; i++) {
+               struct of_regulator_match *match = &matches[i];
+               match->init_data = NULL;
+               match->of_node = NULL;
+       }
+
        for_each_child_of_node(node, child) {
                name = of_get_property(child,
                                        "regulator-compatible", NULL);
index c9e912f583bc57485e09909bff01b58af315d2cb..cde13bb5a8fbc4163eb7769b9b7e09e90eaf1d75 100644 (file)
@@ -527,6 +527,7 @@ static void palmas_dt_to_pdata(struct device *dev,
        u32 prop;
        int idx, ret;
 
+       node = of_node_get(node);
        regulators = of_find_node_by_name(node, "regulators");
        if (!regulators) {
                dev_info(dev, "regulator node not found\n");
@@ -535,6 +536,7 @@ static void palmas_dt_to_pdata(struct device *dev,
 
        ret = of_regulator_match(dev, regulators, palmas_matches,
                        PALMAS_NUM_REGS);
+       of_node_put(regulators);
        if (ret < 0) {
                dev_err(dev, "Error parsing regulator init data: %d\n", ret);
                return;
@@ -565,11 +567,6 @@ static void palmas_dt_to_pdata(struct device *dev,
                if (!ret)
                        pdata->reg_init[idx]->mode_sleep = prop;
 
-               ret = of_property_read_u32(palmas_matches[idx].of_node,
-                               "ti,warm_reset", &prop);
-               if (!ret)
-                       pdata->reg_init[idx]->warm_reset = prop;
-
                ret = of_property_read_u32(palmas_matches[idx].of_node,
                                "ti,tstep", &prop);
                if (!ret)
index bd062a2ffbe235cb9e4c4636b59b8676738676e8..cd9ea2ea1826bc5e6b40de856b4884637eae3103 100644 (file)
@@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {
        .min_uV         = S2MPS11_BUCK_MIN2,                    \
        .uV_step        = S2MPS11_BUCK_STEP2,                   \
        .n_voltages     = S2MPS11_BUCK_N_VOLTAGES,              \
-       .vsel_reg       = S2MPS11_REG_B9CTRL2,                  \
+       .vsel_reg       = S2MPS11_REG_B10CTRL2,                 \
        .vsel_mask      = S2MPS11_BUCK_VSEL_MASK,               \
-       .enable_reg     = S2MPS11_REG_B9CTRL1,                  \
+       .enable_reg     = S2MPS11_REG_B10CTRL1,                 \
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
index 9f991f2c525af531ec0b5450523bb2e0e75a5b54..8a831947c351152bb52b886d81eef232eb5794c6 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/bug.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -21,6 +22,9 @@
 #include <linux/regulator/machine.h>
 #include <linux/mfd/samsung/core.h>
 #include <linux/mfd/samsung/s5m8767.h>
+#include <linux/regulator/of_regulator.h>
+
+#define S5M8767_OPMODE_NORMAL_MODE 0x1
 
 struct s5m8767_info {
        struct device *dev;
@@ -214,7 +218,7 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
        struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
        int ret, reg;
        int mask = 0xc0, enable_ctrl;
-       u8 val;
+       unsigned int val;
 
        ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
        if (ret == -EINVAL)
@@ -255,10 +259,8 @@ static int s5m8767_reg_disable(struct regulator_dev *rdev)
        return sec_reg_update(s5m8767->iodev, reg, ~mask, mask);
 }
 
-static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
+static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
 {
-       struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-       int reg_id = rdev_get_id(rdev);
        int reg;
 
        switch (reg_id) {
@@ -296,43 +298,18 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
                return -EINVAL;
        }
 
-       *_reg = reg;
-
-       return 0;
+       return reg;
 }
 
-static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
-{
-       struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-       int reg, mask, ret;
-       int reg_id = rdev_get_id(rdev);
-       u8 val;
-
-       ret = s5m8767_get_voltage_register(rdev, &reg);
-       if (ret)
-               return ret;
-
-       mask = (reg_id < S5M8767_BUCK1) ? 0x3f : 0xff;
-
-       ret = sec_reg_read(s5m8767->iodev, reg, &val);
-       if (ret)
-               return ret;
-
-       val &= mask;
-
-       return val;
-}
-
-static int s5m8767_convert_voltage_to_sel(
-               const struct sec_voltage_desc *desc,
-               int min_vol, int max_vol)
+static int s5m8767_convert_voltage_to_sel(const struct sec_voltage_desc *desc,
+                                         int min_vol)
 {
        int selector = 0;
 
        if (desc == NULL)
                return -EINVAL;
 
-       if (max_vol < desc->min || min_vol > desc->max)
+       if (min_vol > desc->max)
                return -EINVAL;
 
        if (min_vol < desc->min)
@@ -340,7 +317,7 @@ static int s5m8767_convert_voltage_to_sel(
 
        selector = DIV_ROUND_UP(min_vol - desc->min, desc->step);
 
-       if (desc->min + desc->step * selector > max_vol)
+       if (desc->min + desc->step * selector > desc->max)
                return -EINVAL;
 
        return selector;
@@ -373,15 +350,13 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
 {
        struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
        int reg_id = rdev_get_id(rdev);
-       int reg, mask, ret = 0, old_index, index = 0;
+       int old_index, index = 0;
        u8 *buck234_vol = NULL;
 
        switch (reg_id) {
        case S5M8767_LDO1 ... S5M8767_LDO28:
-               mask = 0x3f;
                break;
        case S5M8767_BUCK1 ... S5M8767_BUCK6:
-               mask = 0xff;
                if (reg_id == S5M8767_BUCK2 && s5m8767->buck2_gpiodvs)
                        buck234_vol = &s5m8767->buck2_vol[0];
                else if (reg_id == S5M8767_BUCK3 && s5m8767->buck3_gpiodvs)
@@ -392,7 +367,6 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
        case S5M8767_BUCK7 ... S5M8767_BUCK8:
                return -EINVAL;
        case S5M8767_BUCK9:
-               mask = 0xff;
                break;
        default:
                return -EINVAL;
@@ -412,11 +386,7 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
                else
                        return s5m8767_set_low(s5m8767);
        } else {
-               ret = s5m8767_get_voltage_register(rdev, &reg);
-               if (ret)
-                       return ret;
-
-               return sec_reg_update(s5m8767->iodev, reg, selector, mask);
+               return regulator_set_voltage_sel_regmap(rdev, selector);
        }
 }
 
@@ -441,7 +411,7 @@ static struct regulator_ops s5m8767_ops = {
        .is_enabled             = s5m8767_reg_is_enabled,
        .enable                 = s5m8767_reg_enable,
        .disable                = s5m8767_reg_disable,
-       .get_voltage_sel        = s5m8767_get_voltage_sel,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = s5m8767_set_voltage_sel,
        .set_voltage_time_sel   = s5m8767_set_voltage_time_sel,
 };
@@ -508,10 +478,182 @@ static struct regulator_desc regulators[] = {
        s5m8767_regulator_desc(BUCK9),
 };
 
+#ifdef CONFIG_OF
+static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
+                       struct sec_platform_data *pdata,
+                       struct device_node *pmic_np)
+{
+       int i, gpio;
+
+       for (i = 0; i < 3; i++) {
+               gpio = of_get_named_gpio(pmic_np,
+                                       "s5m8767,pmic-buck-dvs-gpios", i);
+               if (!gpio_is_valid(gpio)) {
+                       dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+                       return -EINVAL;
+               }
+               pdata->buck_gpios[i] = gpio;
+       }
+       return 0;
+}
+
+static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
+                       struct sec_platform_data *pdata,
+                       struct device_node *pmic_np)
+{
+       int i, gpio;
+
+       for (i = 0; i < 3; i++) {
+               gpio = of_get_named_gpio(pmic_np,
+                                       "s5m8767,pmic-buck-ds-gpios", i);
+               if (!gpio_is_valid(gpio)) {
+                       dev_err(iodev->dev, "invalid gpio[%d]: %d\n", i, gpio);
+                       return -EINVAL;
+               }
+               pdata->buck_ds[i] = gpio;
+       }
+       return 0;
+}
+
+static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+                                       struct sec_platform_data *pdata)
+{
+       struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+       struct device_node *pmic_np, *regulators_np, *reg_np;
+       struct sec_regulator_data *rdata;
+       struct sec_opmode_data *rmode;
+       unsigned int i, dvs_voltage_nr = 1, ret;
+
+       pmic_np = iodev->dev->of_node;
+       if (!pmic_np) {
+               dev_err(iodev->dev, "could not find pmic sub-node\n");
+               return -ENODEV;
+       }
+
+       regulators_np = of_find_node_by_name(pmic_np, "regulators");
+       if (!regulators_np) {
+               dev_err(iodev->dev, "could not find regulators sub-node\n");
+               return -EINVAL;
+       }
+
+       /* count the number of regulators to be supported in pmic */
+       pdata->num_regulators = of_get_child_count(regulators_np);
+
+       rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
+                               pdata->num_regulators, GFP_KERNEL);
+       if (!rdata) {
+               dev_err(iodev->dev,
+                       "could not allocate memory for regulator data\n");
+               return -ENOMEM;
+       }
+
+       rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
+                               pdata->num_regulators, GFP_KERNEL);
+       if (!rdata) {
+               dev_err(iodev->dev,
+                       "could not allocate memory for regulator mode\n");
+               return -ENOMEM;
+       }
+
+       pdata->regulators = rdata;
+       pdata->opmode = rmode;
+       for_each_child_of_node(regulators_np, reg_np) {
+               for (i = 0; i < ARRAY_SIZE(regulators); i++)
+                       if (!of_node_cmp(reg_np->name, regulators[i].name))
+                               break;
+
+               if (i == ARRAY_SIZE(regulators)) {
+                       dev_warn(iodev->dev,
+                       "don't know how to configure regulator %s\n",
+                       reg_np->name);
+                       continue;
+               }
+
+               rdata->id = i;
+               rdata->initdata = of_get_regulator_init_data(
+                                               &pdev->dev, reg_np);
+               rdata->reg_node = reg_np;
+               rdata++;
+               rmode->id = i;
+               if (of_property_read_u32(reg_np, "op_mode",
+                               &rmode->mode)) {
+                       dev_warn(iodev->dev,
+                               "no op_mode property property at %s\n",
+                               reg_np->full_name);
+
+                       rmode->mode = S5M8767_OPMODE_NORMAL_MODE;
+               }
+               rmode++;
+       }
+
+       if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL))
+               pdata->buck2_gpiodvs = true;
+
+       if (of_get_property(pmic_np, "s5m8767,pmic-buck3-uses-gpio-dvs", NULL))
+               pdata->buck3_gpiodvs = true;
+
+       if (of_get_property(pmic_np, "s5m8767,pmic-buck4-uses-gpio-dvs", NULL))
+               pdata->buck4_gpiodvs = true;
+
+       if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
+                                               pdata->buck4_gpiodvs) {
+               ret = s5m8767_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
+               if (ret)
+                       return -EINVAL;
+
+               if (of_property_read_u32(pmic_np,
+                               "s5m8767,pmic-buck-default-dvs-idx",
+                               &pdata->buck_default_idx)) {
+                       pdata->buck_default_idx = 0;
+               } else {
+                       if (pdata->buck_default_idx >= 8) {
+                               pdata->buck_default_idx = 0;
+                               dev_info(iodev->dev,
+                               "invalid value for default dvs index, use 0\n");
+                       }
+               }
+               dvs_voltage_nr = 8;
+       }
+
+       ret = s5m8767_pmic_dt_parse_ds_gpio(iodev, pdata, pmic_np);
+       if (ret)
+               return -EINVAL;
+
+       if (of_property_read_u32_array(pmic_np,
+                               "s5m8767,pmic-buck2-dvs-voltage",
+                               pdata->buck2_voltage, dvs_voltage_nr)) {
+               dev_err(iodev->dev, "buck2 voltages not specified\n");
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32_array(pmic_np,
+                               "s5m8767,pmic-buck3-dvs-voltage",
+                               pdata->buck3_voltage, dvs_voltage_nr)) {
+               dev_err(iodev->dev, "buck3 voltages not specified\n");
+               return -EINVAL;
+       }
+
+       if (of_property_read_u32_array(pmic_np,
+                               "s5m8767,pmic-buck4-dvs-voltage",
+                               pdata->buck4_voltage, dvs_voltage_nr)) {
+               dev_err(iodev->dev, "buck4 voltages not specified\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+#else
+static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+                                       struct sec_platform_data *pdata)
+{
+       return 0;
+}
+#endif /* CONFIG_OF */
+
 static int s5m8767_pmic_probe(struct platform_device *pdev)
 {
        struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
-       struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+       struct sec_platform_data *pdata = iodev->pdata;
        struct regulator_config config = { };
        struct regulator_dev **rdev;
        struct s5m8767_info *s5m8767;
@@ -522,6 +664,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       if (iodev->dev->of_node) {
+               ret = s5m8767_pmic_dt_parse_pdata(pdev, pdata);
+               if (ret)
+                       return ret;
+       }
+
        if (pdata->buck2_gpiodvs) {
                if (pdata->buck3_gpiodvs || pdata->buck4_gpiodvs) {
                        dev_err(&pdev->dev, "S5M8767 GPIO DVS NOT VALID\n");
@@ -577,23 +725,17 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
        s5m8767->opmode = pdata->opmode;
 
        buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
-                                               pdata->buck2_init,
-                                               pdata->buck2_init +
-                                               buck_voltage_val2.step);
+                                                  pdata->buck2_init);
 
        sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
 
        buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
-                                               pdata->buck3_init,
-                                               pdata->buck3_init +
-                                               buck_voltage_val2.step);
+                                                  pdata->buck3_init);
 
        sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
 
        buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
-                                               pdata->buck4_init,
-                                               pdata->buck4_init +
-                                               buck_voltage_val2.step);
+                                                  pdata->buck4_init);
 
        sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
 
@@ -602,27 +744,21 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                        s5m8767->buck2_vol[i] =
                                s5m8767_convert_voltage_to_sel(
                                                &buck_voltage_val2,
-                                               pdata->buck2_voltage[i],
-                                               pdata->buck2_voltage[i] +
-                                               buck_voltage_val2.step);
+                                               pdata->buck2_voltage[i]);
                }
 
                if (s5m8767->buck3_gpiodvs) {
                        s5m8767->buck3_vol[i] =
                                s5m8767_convert_voltage_to_sel(
                                                &buck_voltage_val2,
-                                               pdata->buck3_voltage[i],
-                                               pdata->buck3_voltage[i] +
-                                               buck_voltage_val2.step);
+                                               pdata->buck3_voltage[i]);
                }
 
                if (s5m8767->buck4_gpiodvs) {
                        s5m8767->buck4_vol[i] =
                                s5m8767_convert_voltage_to_sel(
                                                &buck_voltage_val2,
-                                               pdata->buck4_voltage[i],
-                                               pdata->buck4_voltage[i] +
-                                               buck_voltage_val2.step);
+                                               pdata->buck4_voltage[i]);
                }
        }
 
@@ -760,11 +896,19 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                                (desc->max - desc->min) / desc->step + 1;
                        regulators[id].min_uV = desc->min;
                        regulators[id].uV_step = desc->step;
+                       regulators[id].vsel_reg =
+                               s5m8767_get_vsel_reg(id, s5m8767);
+                       if (id < S5M8767_BUCK1)
+                               regulators[id].vsel_mask = 0x3f;
+                       else
+                               regulators[id].vsel_mask = 0xff;
                }
 
                config.dev = s5m8767->dev;
                config.init_data = pdata->regulators[i].initdata;
                config.driver_data = s5m8767;
+               config.regmap = iodev->regmap;
+               config.of_node = pdata->regulators[i].reg_node;
 
                rdev[i] = regulator_register(&regulators[id], &config);
                if (IS_ERR(rdev[i])) {
index ab21133e6784dc43d5990de54427b4a64db9ca52..6e67be75ea1b5fc63095980fd24d68591d7b5389 100644 (file)
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/regulator/tps51632-regulator.h>
 #include <linux/slab.h>
 
@@ -85,49 +88,8 @@ struct tps51632_chip {
        struct regulator_desc desc;
        struct regulator_dev *rdev;
        struct regmap *regmap;
-       bool enable_pwm_dvfs;
 };
 
-static int tps51632_dcdc_get_voltage_sel(struct regulator_dev *rdev)
-{
-       struct tps51632_chip *tps = rdev_get_drvdata(rdev);
-       unsigned int data;
-       int ret;
-       unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
-       int vsel;
-
-       if (tps->enable_pwm_dvfs)
-               reg = TPS51632_VOLTAGE_BASE_REG;
-
-       ret = regmap_read(tps->regmap, reg, &data);
-       if (ret < 0) {
-               dev_err(tps->dev, "reg read failed, err %d\n", ret);
-               return ret;
-       }
-
-       vsel = data & TPS51632_VOUT_MASK;
-       return vsel;
-}
-
-static int tps51632_dcdc_set_voltage_sel(struct regulator_dev *rdev,
-               unsigned selector)
-{
-       struct tps51632_chip *tps = rdev_get_drvdata(rdev);
-       int ret;
-       unsigned int reg = TPS51632_VOLTAGE_SELECT_REG;
-
-       if (tps->enable_pwm_dvfs)
-               reg = TPS51632_VOLTAGE_BASE_REG;
-
-       if (selector > TPS51632_MAX_VSEL)
-               return -EINVAL;
-
-       ret = regmap_write(tps->regmap, reg, selector);
-       if (ret < 0)
-               dev_err(tps->dev, "reg write failed, err %d\n", ret);
-       return ret;
-}
-
 static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
                int ramp_delay)
 {
@@ -144,8 +106,8 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
 }
 
 static struct regulator_ops tps51632_dcdc_ops = {
-       .get_voltage_sel        = tps51632_dcdc_get_voltage_sel,
-       .set_voltage_sel        = tps51632_dcdc_set_voltage_sel,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .list_voltage           = regulator_list_voltage_linear,
        .set_voltage_time_sel   = regulator_set_voltage_time_sel,
        .set_ramp_delay         = tps51632_dcdc_set_ramp_delay,
@@ -162,7 +124,6 @@ static int tps51632_init_dcdc(struct tps51632_chip *tps,
                goto skip_pwm_config;
 
        control |= TPS51632_DVFS_PWMEN;
-       tps->enable_pwm_dvfs = pdata->enable_pwm_dvfs;
        vsel = TPS51632_VOLT_VSEL(pdata->base_voltage_uV);
        ret = regmap_write(tps->regmap, TPS51632_VOLTAGE_BASE_REG, vsel);
        if (ret < 0) {
@@ -205,22 +166,96 @@ skip_pwm_config:
        return ret;
 }
 
-static bool rd_wr_reg(struct device *dev, unsigned int reg)
+static bool is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS51632_OFFSET_REG:
+       case TPS51632_FAULT_REG:
+       case TPS51632_IMON_REG:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool is_read_reg(struct device *dev, unsigned int reg)
 {
-       if ((reg >= 0x8) && (reg <= 0x10))
+       switch (reg) {
+       case 0x08 ... 0x0F:
                return false;
-       return true;
+       default:
+               return true;
+       }
+}
+
+static bool is_write_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS51632_VOLTAGE_SELECT_REG:
+       case TPS51632_VOLTAGE_BASE_REG:
+       case TPS51632_VMAX_REG:
+       case TPS51632_DVFS_CONTROL_REG:
+       case TPS51632_POWER_STATE_REG:
+       case TPS51632_SLEW_REGS:
+               return true;
+       default:
+               return false;
+       }
 }
 
 static const struct regmap_config tps51632_regmap_config = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .writeable_reg          = rd_wr_reg,
-       .readable_reg           = rd_wr_reg,
+       .writeable_reg          = is_write_reg,
+       .readable_reg           = is_read_reg,
+       .volatile_reg           = is_volatile_reg,
        .max_register           = TPS51632_MAX_REG - 1,
        .cache_type             = REGCACHE_RBTREE,
 };
 
+#if defined(CONFIG_OF)
+static const struct of_device_id tps51632_of_match[] = {
+       { .compatible = "ti,tps51632",},
+       {},
+};
+MODULE_DEVICE_TABLE(of, tps51632_of_match);
+
+static struct tps51632_regulator_platform_data *
+       of_get_tps51632_platform_data(struct device *dev)
+{
+       struct tps51632_regulator_platform_data *pdata;
+       struct device_node *np = dev->of_node;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata) {
+               dev_err(dev, "Memory alloc failed for platform data\n");
+               return NULL;
+       }
+
+       pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
+       if (!pdata->reg_init_data) {
+               dev_err(dev, "Not able to get OF regulator init data\n");
+               return NULL;
+       }
+
+       pdata->enable_pwm_dvfs =
+                       of_property_read_bool(np, "ti,enable-pwm-dvfs");
+       pdata->dvfs_step_20mV = of_property_read_bool(np, "ti,dvfs-step-20mV");
+
+       pdata->base_voltage_uV = pdata->reg_init_data->constraints.min_uV ? :
+                                       TPS51632_MIN_VOLATGE;
+       pdata->max_voltage_uV = pdata->reg_init_data->constraints.max_uV ? :
+                                       TPS51632_MAX_VOLATGE;
+       return pdata;
+}
+#else
+static struct tps51632_regulator_platform_data *
+       of_get_tps51632_platform_data(struct device *dev)
+{
+       return NULL;
+}
+#endif
+
 static int tps51632_probe(struct i2c_client *client,
                                const struct i2c_device_id *id)
 {
@@ -230,7 +265,19 @@ static int tps51632_probe(struct i2c_client *client,
        int ret;
        struct regulator_config config = { };
 
+       if (client->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_device(of_match_ptr(tps51632_of_match),
+                               &client->dev);
+               if (!match) {
+                       dev_err(&client->dev, "Error: No device match found\n");
+                       return -ENODEV;
+               }
+       }
+
        pdata = client->dev.platform_data;
+       if (!pdata && client->dev.of_node)
+               pdata = of_get_tps51632_platform_data(&client->dev);
        if (!pdata) {
                dev_err(&client->dev, "No Platform data\n");
                return -EINVAL;
@@ -269,6 +316,12 @@ static int tps51632_probe(struct i2c_client *client,
        tps->desc.type = REGULATOR_VOLTAGE;
        tps->desc.owner = THIS_MODULE;
 
+       if (pdata->enable_pwm_dvfs)
+               tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG;
+       else
+               tps->desc.vsel_reg = TPS51632_VOLTAGE_SELECT_REG;
+       tps->desc.vsel_mask = TPS51632_VOUT_MASK;
+
        tps->regmap = devm_regmap_init_i2c(client, &tps51632_regmap_config);
        if (IS_ERR(tps->regmap)) {
                ret = PTR_ERR(tps->regmap);
@@ -319,6 +372,7 @@ static struct i2c_driver tps51632_i2c_driver = {
        .driver = {
                .name = "tps51632",
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(tps51632_of_match),
        },
        .probe = tps51632_probe,
        .remove = tps51632_remove,
index 0233cfb5656058a57ccd0dce40591701c16ce8b6..54aa2da7283bd6c0e898a5f90b67310877009a9a 100644 (file)
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/regulator/tps6507x.h>
+#include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/mfd/tps6507x.h>
+#include <linux/regulator/of_regulator.h>
 
 /* DCDC's */
 #define TPS6507X_DCDC_1                                0
@@ -356,6 +358,80 @@ static struct regulator_ops tps6507x_pmic_ops = {
        .list_voltage = regulator_list_voltage_table,
 };
 
+#ifdef CONFIG_OF
+static struct of_regulator_match tps6507x_matches[] = {
+       { .name = "VDCDC1"},
+       { .name = "VDCDC2"},
+       { .name = "VDCDC3"},
+       { .name = "LDO1"},
+       { .name = "LDO2"},
+};
+
+static struct tps6507x_board *tps6507x_parse_dt_reg_data(
+               struct platform_device *pdev,
+               struct of_regulator_match **tps6507x_reg_matches)
+{
+       struct tps6507x_board *tps_board;
+       struct device_node *np = pdev->dev.parent->of_node;
+       struct device_node *regulators;
+       struct of_regulator_match *matches;
+       static struct regulator_init_data *reg_data;
+       int idx = 0, count, ret;
+
+       tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
+                                       GFP_KERNEL);
+       if (!tps_board) {
+               dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+               return NULL;
+       }
+
+       regulators = of_find_node_by_name(np, "regulators");
+       if (!regulators) {
+               dev_err(&pdev->dev, "regulator node not found\n");
+               return NULL;
+       }
+
+       count = ARRAY_SIZE(tps6507x_matches);
+       matches = tps6507x_matches;
+
+       ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+                       ret);
+               return NULL;
+       }
+
+       *tps6507x_reg_matches = matches;
+
+       reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
+                                       * TPS6507X_NUM_REGULATOR), GFP_KERNEL);
+       if (!reg_data) {
+               dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
+               return NULL;
+       }
+
+       tps_board->tps6507x_pmic_init_data = reg_data;
+
+       for (idx = 0; idx < count; idx++) {
+               if (!matches[idx].init_data || !matches[idx].of_node)
+                       continue;
+
+               memcpy(&reg_data[idx], matches[idx].init_data,
+                               sizeof(struct regulator_init_data));
+
+       }
+
+       return tps_board;
+}
+#else
+static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
+                       struct platform_device *pdev,
+                       struct of_regulator_match **tps6507x_reg_matches)
+{
+       *tps6507x_reg_matches = NULL;
+       return NULL;
+}
+#endif
 static int tps6507x_pmic_probe(struct platform_device *pdev)
 {
        struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
@@ -365,8 +441,10 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
        struct regulator_dev *rdev;
        struct tps6507x_pmic *tps;
        struct tps6507x_board *tps_board;
+       struct of_regulator_match *tps6507x_reg_matches = NULL;
        int i;
        int error;
+       unsigned int prop;
 
        /**
         * tps_board points to pmic related constants
@@ -374,6 +452,9 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
         */
 
        tps_board = dev_get_platdata(tps6507x_dev->dev);
+       if (!tps_board && tps6507x_dev->dev->of_node)
+               tps_board = tps6507x_parse_dt_reg_data(pdev,
+                                               &tps6507x_reg_matches);
        if (!tps_board)
                return -EINVAL;
 
@@ -415,6 +496,17 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
                config.init_data = init_data;
                config.driver_data = tps;
 
+               if (tps6507x_reg_matches) {
+                       error = of_property_read_u32(
+                               tps6507x_reg_matches[i].of_node,
+                                       "ti,defdcdc_default", &prop);
+
+                       if (!error)
+                               tps->info[i]->defdcdc_default = prop;
+
+                       config.of_node = tps6507x_reg_matches[i].of_node;
+               }
+
                rdev = regulator_register(&tps->desc[i], &config);
                if (IS_ERR(rdev)) {
                        dev_err(tps6507x_dev->dev,
index 41c391789c9790a59678dc1232bf0938e978a47e..c8e70451df38864ffbe2ffb3b9fb3d67817fedc4 100644 (file)
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
 #include <linux/mfd/tps65090.h>
 
 struct tps65090_regulator {
@@ -67,8 +69,8 @@ static struct regulator_desc tps65090_regulator_desc[] = {
        tps65090_REG_DESC(FET5,  "infet5",  0x13, tps65090_reg_contol_ops),
        tps65090_REG_DESC(FET6,  "infet6",  0x14, tps65090_reg_contol_ops),
        tps65090_REG_DESC(FET7,  "infet7",  0x15, tps65090_reg_contol_ops),
-       tps65090_REG_DESC(LDO1,  "vsys_l1", 0,    tps65090_ldo_ops),
-       tps65090_REG_DESC(LDO2,  "vsys_l2", 0,    tps65090_ldo_ops),
+       tps65090_REG_DESC(LDO1,  "vsys-l1", 0,    tps65090_ldo_ops),
+       tps65090_REG_DESC(LDO2,  "vsys-l2", 0,    tps65090_ldo_ops),
 };
 
 static inline bool is_dcdc(int id)
@@ -138,6 +140,92 @@ static void tps65090_configure_regulator_config(
        }
 }
 
+#ifdef CONFIG_OF
+static struct of_regulator_match tps65090_matches[] = {
+       { .name = "dcdc1", },
+       { .name = "dcdc2", },
+       { .name = "dcdc3", },
+       { .name = "fet1",  },
+       { .name = "fet2",  },
+       { .name = "fet3",  },
+       { .name = "fet4",  },
+       { .name = "fet5",  },
+       { .name = "fet6",  },
+       { .name = "fet7",  },
+       { .name = "ldo1",  },
+       { .name = "ldo2",  },
+};
+
+static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
+               struct platform_device *pdev,
+               struct of_regulator_match **tps65090_reg_matches)
+{
+       struct tps65090_platform_data *tps65090_pdata;
+       struct device_node *np = pdev->dev.parent->of_node;
+       struct device_node *regulators;
+       int idx = 0, ret;
+       struct tps65090_regulator_plat_data *reg_pdata;
+
+       tps65090_pdata = devm_kzalloc(&pdev->dev, sizeof(*tps65090_pdata),
+                               GFP_KERNEL);
+       if (!tps65090_pdata) {
+               dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       reg_pdata = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX *
+                               sizeof(*reg_pdata), GFP_KERNEL);
+       if (!reg_pdata) {
+               dev_err(&pdev->dev, "Memory alloc for reg_pdata failed\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       regulators = of_find_node_by_name(np, "regulators");
+       if (!regulators) {
+               dev_err(&pdev->dev, "regulator node not found\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       ret = of_regulator_match(&pdev->dev, regulators, tps65090_matches,
+                       ARRAY_SIZE(tps65090_matches));
+       if (ret < 0) {
+               dev_err(&pdev->dev,
+                       "Error parsing regulator init data: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       *tps65090_reg_matches = tps65090_matches;
+       for (idx = 0; idx < ARRAY_SIZE(tps65090_matches); idx++) {
+               struct regulator_init_data *ri_data;
+               struct tps65090_regulator_plat_data *rpdata;
+
+               rpdata = &reg_pdata[idx];
+               ri_data = tps65090_matches[idx].init_data;
+               if (!ri_data || !tps65090_matches[idx].of_node)
+                       continue;
+
+               rpdata->reg_init_data = ri_data;
+               rpdata->enable_ext_control = of_property_read_bool(
+                                       tps65090_matches[idx].of_node,
+                                       "ti,enable-ext-control");
+               if (rpdata->enable_ext_control)
+                       rpdata->gpio = of_get_named_gpio(np,
+                                       "dcdc-ext-control-gpios", 0);
+
+               tps65090_pdata->reg_pdata[idx] = rpdata;
+       }
+       return tps65090_pdata;
+}
+#else
+static inline struct tps65090_platform_data *tps65090_parse_dt_reg_data(
+                       struct platform_device *pdev,
+                       struct of_regulator_match **tps65090_reg_matches)
+{
+       *tps65090_reg_matches = NULL;
+       return NULL;
+}
+#endif
+
 static int tps65090_regulator_probe(struct platform_device *pdev)
 {
        struct tps65090 *tps65090_mfd = dev_get_drvdata(pdev->dev.parent);
@@ -147,15 +235,19 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
        struct tps65090_regulator_plat_data *tps_pdata;
        struct tps65090_regulator *pmic;
        struct tps65090_platform_data *tps65090_pdata;
+       struct of_regulator_match *tps65090_reg_matches = NULL;
        int num;
        int ret;
 
        dev_dbg(&pdev->dev, "Probing regulator\n");
 
        tps65090_pdata = dev_get_platdata(pdev->dev.parent);
-       if (!tps65090_pdata) {
+       if (!tps65090_pdata && tps65090_mfd->dev->of_node)
+               tps65090_pdata = tps65090_parse_dt_reg_data(pdev,
+                                       &tps65090_reg_matches);
+       if (IS_ERR_OR_NULL(tps65090_pdata)) {
                dev_err(&pdev->dev, "Platform data missing\n");
-               return -EINVAL;
+               return tps65090_pdata ? PTR_ERR(tps65090_pdata) : -EINVAL;
        }
 
        pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
@@ -192,13 +284,17 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
                        }
                }
 
-               config.dev = &pdev->dev;
+               config.dev = pdev->dev.parent;
                config.driver_data = ri;
                config.regmap = tps65090_mfd->rmap;
                if (tps_pdata)
                        config.init_data = tps_pdata->reg_init_data;
                else
                        config.init_data = NULL;
+               if (tps65090_reg_matches)
+                       config.of_node = tps65090_reg_matches[num].of_node;
+               else
+                       config.of_node = NULL;
 
                rdev = regulator_register(ri->desc, &config);
                if (IS_ERR(rdev)) {
index 73dce76641265590179cb2440e75ad71ffd722d0..df395187c06301114c144305b19cdd29dedb5a22 100644 (file)
@@ -305,8 +305,8 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
        if (!regs)
                return NULL;
 
-       count = of_regulator_match(pdev->dev.parent, regs,
-                               reg_matches, TPS65217_NUM_REGULATOR);
+       count = of_regulator_match(&pdev->dev, regs, reg_matches,
+                                  TPS65217_NUM_REGULATOR);
        of_node_put(regs);
        if ((count < 0) || (count > TPS65217_NUM_REGULATOR))
                return NULL;
index f86da672c758b4ab1070facacdf291cabc482c66..e68382d0e1ea1c8025377f1755f90d95a33f172d 100644 (file)
@@ -61,10 +61,6 @@ struct tps6586x_regulator {
 
        int enable_bit[2];
        int enable_reg[2];
-
-       /* for DVM regulators */
-       int go_reg;
-       int go_bit;
 };
 
 static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
@@ -72,37 +68,10 @@ static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
        return rdev_get_dev(rdev)->parent;
 }
 
-static int tps6586x_set_voltage_sel(struct regulator_dev *rdev,
-                                   unsigned selector)
-{
-       struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
-       struct device *parent = to_tps6586x_dev(rdev);
-       int ret, val, rid = rdev_get_id(rdev);
-       uint8_t mask;
-
-       val = selector << (ffs(rdev->desc->vsel_mask) - 1);
-       mask = rdev->desc->vsel_mask;
-
-       ret = tps6586x_update(parent, rdev->desc->vsel_reg, val, mask);
-       if (ret)
-               return ret;
-
-       /* Update go bit for DVM regulators */
-       switch (rid) {
-       case TPS6586X_ID_LDO_2:
-       case TPS6586X_ID_LDO_4:
-       case TPS6586X_ID_SM_0:
-       case TPS6586X_ID_SM_1:
-               ret = tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
-               break;
-       }
-       return ret;
-}
-
 static struct regulator_ops tps6586x_regulator_ops = {
        .list_voltage = regulator_list_voltage_table,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
-       .set_voltage_sel = tps6586x_set_voltage_sel,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
 
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
@@ -142,7 +111,7 @@ static const unsigned int tps6586x_dvm_voltages[] = {
 };
 
 #define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits,  \
-                          ereg0, ebit0, ereg1, ebit1)                  \
+                          ereg0, ebit0, ereg1, ebit1, goreg, gobit)    \
        .desc   = {                                                     \
                .supply_name = _pin_name,                               \
                .name   = "REG-" #_id,                                  \
@@ -156,29 +125,26 @@ static const unsigned int tps6586x_dvm_voltages[] = {
                .enable_mask = 1 << (ebit0),                            \
                .vsel_reg = TPS6586X_##vreg,                            \
                .vsel_mask = ((1 << (nbits)) - 1) << (shift),           \
+               .apply_reg = (goreg),                           \
+               .apply_bit = (gobit),                           \
        },                                                              \
        .enable_reg[0]  = TPS6586X_SUPPLY##ereg0,                       \
        .enable_bit[0]  = (ebit0),                                      \
        .enable_reg[1]  = TPS6586X_SUPPLY##ereg1,                       \
        .enable_bit[1]  = (ebit1),
 
-#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit)                     \
-       .go_reg = TPS6586X_##goreg,                                     \
-       .go_bit = (gobit),
-
 #define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits,           \
                     ereg0, ebit0, ereg1, ebit1)                        \
 {                                                                      \
        TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits,      \
-                          ereg0, ebit0, ereg1, ebit1)                  \
+                          ereg0, ebit0, ereg1, ebit1, 0, 0)            \
 }
 
 #define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits,           \
                     ereg0, ebit0, ereg1, ebit1, goreg, gobit)          \
 {                                                                      \
        TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits,      \
-                          ereg0, ebit0, ereg1, ebit1)                  \
-       TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit)                      \
+                          ereg0, ebit0, ereg1, ebit1, goreg, gobit)    \
 }
 
 #define TPS6586X_SYS_REGULATOR()                                       \
@@ -207,13 +173,13 @@ static struct tps6586x_regulator tps6586x_regulator[] = {
        TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
 
        TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
-                                       ENB, 3, VCC2, 6),
+                                       ENB, 3, TPS6586X_VCC2, BIT(6)),
        TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
-                                       END, 3, VCC1, 6),
+                                       END, 3, TPS6586X_VCC1, BIT(6)),
        TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
-                                       ENB, 1, VCC1, 2),
+                                       ENB, 1, TPS6586X_VCC1, BIT(2)),
        TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
-                                       ENB, 0, VCC1, 0),
+                                       ENB, 0, TPS6586X_VCC1, BIT(0)),
 };
 
 /*
index 59c3770fa77dbe5b77ae1b3c96c1e105cae8f58e..6ba6931ac8557c7ee902438cfd4679017c559121 100644 (file)
@@ -964,8 +964,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
 {
        struct tps65910_board *pmic_plat_data;
        struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
-       struct device_node *np = pdev->dev.parent->of_node;
-       struct device_node *regulators;
+       struct device_node *np, *regulators;
        struct of_regulator_match *matches;
        unsigned int prop;
        int idx = 0, ret, count;
@@ -978,6 +977,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
                return NULL;
        }
 
+       np = of_node_get(pdev->dev.parent->of_node);
        regulators = of_find_node_by_name(np, "regulators");
        if (!regulators) {
                dev_err(&pdev->dev, "regulator node not found\n");
@@ -994,11 +994,13 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
                matches = tps65911_matches;
                break;
        default:
+               of_node_put(regulators);
                dev_err(&pdev->dev, "Invalid tps chip version\n");
                return NULL;
        }
 
-       ret = of_regulator_match(pdev->dev.parent, regulators, matches, count);
+       ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+       of_node_put(regulators);
        if (ret < 0) {
                dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
                        ret);
index b15d711bc8c66634c9792c26e15885e055aabe11..9019d0e7ecb6ad9868fa96e23fa0f77a1275fc75 100644 (file)
@@ -728,7 +728,7 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
                        }
                }
                rdev = regulator_register(&ri->rinfo->desc, &config);
-               if (IS_ERR_OR_NULL(rdev)) {
+               if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev,
                                "register regulator failed %s\n",
                                        ri->rinfo->desc.name);
index 923a9da9c829d7be9a3fe95bac77b0b7e4431de6..5e44eaabf45753f11cbfecb87d72627d56985606 100644 (file)
@@ -20,14 +20,24 @@ if RTC_CLASS
 config RTC_HCTOSYS
        bool "Set system time from RTC on startup and resume"
        default y
+       depends on !ALWAYS_USE_PERSISTENT_CLOCK
        help
          If you say yes here, the system time (wall clock) will be set using
          the value read from a specified RTC device. This is useful to avoid
          unnecessary fsck runs at boot time, and to network better.
 
+config RTC_SYSTOHC
+       bool "Set the RTC time based on NTP synchronization"
+       default y
+       depends on !ALWAYS_USE_PERSISTENT_CLOCK
+       help
+         If you say yes here, the system time (wall clock) will be stored
+         in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
+         minutes if userspace reports synchronized NTP status.
+
 config RTC_HCTOSYS_DEVICE
        string "RTC used to set the system time"
-       depends on RTC_HCTOSYS = y
+       depends on RTC_HCTOSYS = y || RTC_SYSTOHC = y
        default "rtc0"
        help
          The RTC device that will be used to (re)initialize the system
index 4418ef3f9ecc9278b53aae5dd5177cff522b24ba..ec2988b00a44cb8aa4f271f9d79d5c7a6c9551a8 100644 (file)
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_RTC_DEBUG)     := -DDEBUG
 
 obj-$(CONFIG_RTC_LIB)          += rtc-lib.o
 obj-$(CONFIG_RTC_HCTOSYS)      += hctosys.o
+obj-$(CONFIG_RTC_SYSTOHC)      += systohc.o
 obj-$(CONFIG_RTC_CLASS)                += rtc-core.o
 rtc-core-y                     := class.o interface.o
 
index 5143629dedbdcf9262240e98f6a9637d249c5679..26388f1825941329b28168a36b338c7405a6a4f7 100644 (file)
@@ -50,6 +50,10 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
        struct rtc_device       *rtc = to_rtc_device(dev);
        struct rtc_time         tm;
        struct timespec         delta, delta_delta;
+
+       if (has_persistent_clock())
+               return 0;
+
        if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
                return 0;
 
@@ -88,6 +92,9 @@ static int rtc_resume(struct device *dev)
        struct timespec         new_system, new_rtc;
        struct timespec         sleep_time;
 
+       if (has_persistent_clock())
+               return 0;
+
        rtc_hctosys_ret = -ENODEV;
        if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0)
                return 0;
index 96bafc5c3bf87c2426892cdc0ffa46756e7baf36..8f0dcfedb83cdfb9c613083574e8e591826beaf5 100644 (file)
@@ -227,7 +227,7 @@ static const struct rtc_class_ops da9055_rtc_ops = {
        .alarm_irq_enable = da9055_rtc_alarm_irq_enable,
 };
 
-static int __init da9055_rtc_device_init(struct da9055 *da9055,
+static int da9055_rtc_device_init(struct da9055 *da9055,
                                        struct da9055_pdata *pdata)
 {
        int ret;
index afb7cfa85ccc04ac637268ee0bba4240f05c12d3..c016ad81767a9d1be4697c267072a124f4398fc6 100644 (file)
@@ -506,6 +506,7 @@ isl1208_rtc_interrupt(int irq, void *data)
 {
        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
        struct i2c_client *client = data;
+       struct rtc_device *rtc = i2c_get_clientdata(client);
        int handled = 0, sr, err;
 
        /*
@@ -528,6 +529,8 @@ isl1208_rtc_interrupt(int irq, void *data)
        if (sr & ISL1208_REG_SR_ALM) {
                dev_dbg(&client->dev, "alarm!\n");
 
+               rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
+
                /* Clear the alarm */
                sr &= ~ISL1208_REG_SR_ALM;
                sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR, sr);
index 08378e3cc21cf8209796d50876ee64ae5c0a4aaa..81c5077feff32af9724da4e81f674b4d29c524e5 100644 (file)
@@ -44,6 +44,7 @@
 #define RTC_YMR                0x34    /* Year match register */
 #define RTC_YLR                0x38    /* Year data load register */
 
+#define RTC_CR_EN      (1 << 0)        /* counter enable bit */
 #define RTC_CR_CWEN    (1 << 26)       /* Clockwatch enable bit */
 
 #define RTC_TCR_EN     (1 << 1) /* Periodic timer enable bit */
@@ -320,7 +321,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        struct pl031_local *ldata;
        struct pl031_vendor_data *vendor = id->data;
        struct rtc_class_ops *ops = &vendor->ops;
-       unsigned long time;
+       unsigned long time, data;
 
        ret = amba_request_regions(adev, NULL);
        if (ret)
@@ -345,10 +346,13 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
        dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
        dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
 
+       data = readl(ldata->base + RTC_CR);
        /* Enable the clockwatch on ST Variants */
        if (vendor->clockwatch)
-               writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
-                      ldata->base + RTC_CR);
+               data |= RTC_CR_CWEN;
+       else
+               data |= RTC_CR_EN;
+       writel(data, ldata->base + RTC_CR);
 
        /*
         * On ST PL031 variants, the RTC reset value does not provide correct
index 00c930f4b6f322fcfd42b37c8f2e6926f0bdbbfd..2730533e2d2df2da9417882fb6891b5a65a17a1f 100644 (file)
@@ -137,7 +137,7 @@ static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
-       writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
+       writel((bin2bcd(tm->tm_year % 100) << DATE_YEAR_S)
                | (bin2bcd(tm->tm_mon + 1) << DATE_MONTH_S)
                | (bin2bcd(tm->tm_mday))
                | ((tm->tm_year >= 200) << DATE_CENTURY_S),
diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c
new file mode 100644 (file)
index 0000000..bf3e242
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ */
+#include <linux/rtc.h>
+#include <linux/time.h>
+
+/**
+ * rtc_set_ntp_time - Save NTP synchronized time to the RTC
+ * @now: Current time of day
+ *
+ * Replacement for the NTP platform function update_persistent_clock
+ * that stores time for later retrieval by rtc_hctosys.
+ *
+ * Returns 0 on successful RTC update, -ENODEV if a RTC update is not
+ * possible at all, and various other -errno for specific temporary failure
+ * cases.
+ *
+ * If temporary failure is indicated the caller should try again 'soon'
+ */
+int rtc_set_ntp_time(struct timespec now)
+{
+       struct rtc_device *rtc;
+       struct rtc_time tm;
+       int err = -ENODEV;
+
+       if (now.tv_nsec < (NSEC_PER_SEC >> 1))
+               rtc_time_to_tm(now.tv_sec, &tm);
+       else
+               rtc_time_to_tm(now.tv_sec + 1, &tm);
+
+       rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+       if (rtc) {
+               /* rtc_hctosys exclusively uses UTC, so we call set_time here,
+                * not set_mmss. */
+               if (rtc->ops && (rtc->ops->set_time || rtc->ops->set_mmss))
+                       err = rtc_set_time(rtc, &tm);
+               rtc_class_close(rtc);
+       }
+
+       return err;
+}
index 9bd5da36f99eb2d3a36642b05508d5578f473846..704488d0f819e70a762e7bda9eef25e9067bc2bc 100644 (file)
@@ -248,7 +248,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
        default:
                return;
        }
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
+       inc_irq_stat(IRQEXT_DSD);
        if (!ip) {              /* no intparm: unsolicited interrupt */
                DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
                              "interrupt");
index 806fe912d6e779ad7456b35f175269352f56e8d6..e37bc1620d143dd12d390d489f76fb127aec059f 100644 (file)
@@ -4274,7 +4274,7 @@ static struct ccw_driver dasd_eckd_driver = {
        .thaw        = dasd_generic_restore_device,
        .restore     = dasd_generic_restore_device,
        .uc_handler  = dasd_generic_uc_handler,
-       .int_class   = IOINT_DAS,
+       .int_class   = IRQIO_DAS,
 };
 
 /*
index eb748507c7fac2b81f7d3d2028a2f11c47aaa854..4146985843444d46da0db635edc357e9076dede4 100644 (file)
@@ -78,7 +78,7 @@ static struct ccw_driver dasd_fba_driver = {
        .freeze      = dasd_generic_pm_freeze,
        .thaw        = dasd_generic_restore_device,
        .restore     = dasd_generic_restore_device,
-       .int_class   = IOINT_DAS,
+       .int_class   = IRQIO_DAS,
 };
 
 static void
index 40084501c31b7a28d2a436353a16e906c5d3640f..33b7141a182f6ef369ef9475f1a9f5293561cd2c 100644 (file)
@@ -44,6 +44,7 @@
 #define RAW3215_NR_CCWS            3
 #define RAW3215_TIMEOUT            HZ/10     /* time for delayed output */
 
+#define RAW3215_FIXED      1         /* 3215 console device is not be freed */
 #define RAW3215_WORKING            4         /* set if a request is being worked on */
 #define RAW3215_THROTTLED   8        /* set if reading is disabled */
 #define RAW3215_STOPPED            16        /* set if writing is disabled */
@@ -630,7 +631,8 @@ static void raw3215_shutdown(struct raw3215_info *raw)
        DECLARE_WAITQUEUE(wait, current);
        unsigned long flags;
 
-       if (!(raw->port.flags & ASYNC_INITIALIZED))
+       if (!(raw->port.flags & ASYNC_INITIALIZED) ||
+           (raw->flags & RAW3215_FIXED))
                return;
        /* Wait for outstanding requests, then free irq */
        spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -805,7 +807,7 @@ static struct ccw_driver raw3215_ccw_driver = {
        .freeze         = &raw3215_pm_stop,
        .thaw           = &raw3215_pm_start,
        .restore        = &raw3215_pm_start,
-       .int_class      = IOINT_C15,
+       .int_class      = IRQIO_C15,
 };
 
 #ifdef CONFIG_TN3215_CONSOLE
@@ -927,6 +929,8 @@ static int __init con3215_init(void)
        dev_set_drvdata(&cdev->dev, raw);
        cdev->handler = raw3215_irq;
 
+       raw->flags |= RAW3215_FIXED;
+
        /* Request the console irq */
        if (raw3215_startup(raw) != 0) {
                raw3215_free_info(raw);
index f3b8bb84faf295013c03c1d4e539ee967015fa2a..9a6c140c5f072133c7f70281387236428f506d7d 100644 (file)
@@ -1396,7 +1396,7 @@ static struct ccw_driver raw3270_ccw_driver = {
        .freeze         = &raw3270_pm_stop,
        .thaw           = &raw3270_pm_start,
        .restore        = &raw3270_pm_start,
-       .int_class      = IOINT_C70,
+       .int_class      = IRQIO_C70,
 };
 
 static int
index 4fa21f7e23085aafbdef58ce7d4805e07418118c..12c16a65dd25ee778e693a36d31d54f782e2ea3a 100644 (file)
@@ -400,7 +400,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
        u32 finished_sccb;
        u32 evbuf_pending;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+       inc_irq_stat(IRQEXT_SCP);
        spin_lock(&sclp_lock);
        finished_sccb = param32 & 0xfffffff8;
        evbuf_pending = param32 & 0x3;
@@ -813,7 +813,7 @@ static void sclp_check_handler(struct ext_code ext_code,
 {
        u32 finished_sccb;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
+       inc_irq_stat(IRQEXT_SCP);
        finished_sccb = param32 & 0xfffffff8;
        /* Is this the interrupt we are waiting for? */
        if (finished_sccb == 0)
index 6ae929c024ae3bf811ecabe4986301ac6b9a807b..9aa79702b3707a26ef0b4d370c54bf86b51251de 100644 (file)
@@ -1193,7 +1193,7 @@ static struct ccw_driver tape_34xx_driver = {
        .set_online = tape_34xx_online,
        .set_offline = tape_generic_offline,
        .freeze = tape_generic_pm_suspend,
-       .int_class = IOINT_TAP,
+       .int_class = IRQIO_TAP,
 };
 
 static int
index 1b0eb49f739c72413c5cdd43835c54141c2464e6..327cb19ad0b0c4df56dfe253a0eb0dd989c5a91c 100644 (file)
@@ -1656,7 +1656,7 @@ static struct ccw_driver tape_3590_driver = {
        .set_offline = tape_generic_offline,
        .set_online = tape_3590_online,
        .freeze = tape_generic_pm_suspend,
-       .int_class = IOINT_TAP,
+       .int_class = IRQIO_TAP,
 };
 
 /*
index 73bef0bd394cab2df006f64bf5e3c25ecce7bb3f..483f72ba030da670acb261774364f2cf0b896400 100644 (file)
@@ -74,7 +74,7 @@ static struct ccw_driver ur_driver = {
        .set_online     = ur_set_online,
        .set_offline    = ur_set_offline,
        .freeze         = ur_pm_suspend,
-       .int_class      = IOINT_VMR,
+       .int_class      = IRQIO_VMR,
 };
 
 static DEFINE_MUTEX(vmur_mutex);
index 68e80e2734a4be89763ca466a48621911570bce4..10729bbceceda4f49432e5eb647e04ae9d1399c4 100644 (file)
@@ -283,7 +283,7 @@ struct chsc_sei_nt2_area {
        u8  ccdf[PAGE_SIZE - 24 - 56];  /* content-code dependent field */
 } __packed;
 
-#define CHSC_SEI_NT0   0ULL
+#define CHSC_SEI_NT0   (1ULL << 63)
 #define CHSC_SEI_NT2   (1ULL << 61)
 
 struct chsc_sei {
@@ -291,7 +291,8 @@ struct chsc_sei {
        u32 reserved1;
        u64 ntsm;                       /* notification type mask */
        struct chsc_header response;
-       u32 reserved2;
+       u32 :24;
+       u8 nt;
        union {
                struct chsc_sei_nt0_area nt0_area;
                struct chsc_sei_nt2_area nt2_area;
@@ -496,17 +497,17 @@ static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
                                css_schedule_eval_all();
                        }
 
-                       switch (sei->ntsm) {
-                       case CHSC_SEI_NT0:
+                       switch (sei->nt) {
+                       case 0:
                                chsc_process_sei_nt0(&sei->u.nt0_area);
-                               return 1;
-                       case CHSC_SEI_NT2:
+                               break;
+                       case 2:
                                chsc_process_sei_nt2(&sei->u.nt2_area);
-                               return 1;
+                               break;
                        default:
-                               CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
-                                             sei->ntsm);
-                               return 0;
+                               CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n",
+                                             sei->nt);
+                               break;
                        }
                } else {
                        CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
@@ -537,15 +538,7 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
        sei = sei_page;
 
        CIO_TRACE_EVENT(2, "prcss");
-
-       /*
-        * The ntsm does not allow to select NT0 and NT2 together. We need to
-        * first check for NT2, than additionally for NT0...
-        */
-#ifdef CONFIG_PCI
-       if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
-#endif
-               __chsc_process_crw(sei, CHSC_SEI_NT0);
+       __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
 }
 
 void chsc_chp_online(struct chp_id chpid)
index 8f9a1a384496eb396ce2b0cbcec28327ddb48d3f..facdf809113f22a6fe9e070b3f8938942bbaf480 100644 (file)
@@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
 
        CHSC_LOG(4, "irb");
        CHSC_LOG_HEX(4, irb, sizeof(*irb));
-       kstat_cpu(smp_processor_id()).irqs[IOINT_CSC]++;
+       inc_irq_stat(IRQIO_CSC);
 
        /* Copy irb to provided request and set done. */
        if (!request) {
index 8e927b9f285f25de6ba520bf04f07661a02e51aa..c8faf6230b0f332010678f5c4aced898cb20b66d 100644 (file)
@@ -611,7 +611,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
        irb = (struct irb *)&S390_lowcore.irb;
        do {
-               kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
+               kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
                if (tpi_info->adapter_IO) {
                        do_adapter_IO(tpi_info->isc);
                        continue;
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
                sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
                if (!sch) {
                        /* Clear pending interrupt condition. */
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                       inc_irq_stat(IRQIO_CIO);
                        tsch(tpi_info->schid, irb);
                        continue;
                }
@@ -633,9 +633,9 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
                        if (sch->driver && sch->driver->irq)
                                sch->driver->irq(sch);
                        else
-                               kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                               inc_irq_stat(IRQIO_CIO);
                } else
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                       inc_irq_stat(IRQIO_CIO);
                spin_unlock(sch->lock);
                /*
                 * Are more interrupts pending?
@@ -678,7 +678,7 @@ static void cio_tsch(struct subchannel *sch)
        if (sch->driver && sch->driver->irq)
                sch->driver->irq(sch);
        else
-               kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+               inc_irq_stat(IRQIO_CIO);
        if (!irq_context) {
                irq_exit();
                _local_bh_enable();
index 6995cff44636a23dfe857098c2faca86670311fe..7cd5c6812ac7ce4d50f2068782f4426f546009df 100644 (file)
@@ -758,7 +758,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
                                        struct ccw_device *cdev)
 {
        cdev->private->cdev = cdev;
-       cdev->private->int_class = IOINT_CIO;
+       cdev->private->int_class = IRQIO_CIO;
        atomic_set(&cdev->private->onoff, 0);
        cdev->dev.parent = &sch->dev;
        cdev->dev.release = ccw_device_release;
@@ -1023,7 +1023,7 @@ static void io_subchannel_irq(struct subchannel *sch)
        if (cdev)
                dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
        else
-               kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+               inc_irq_stat(IRQIO_CIO);
 }
 
 void io_subchannel_init_config(struct subchannel *sch)
@@ -1634,7 +1634,7 @@ ccw_device_probe_console(void)
        memset(&console_private, 0, sizeof(struct ccw_device_private));
        console_cdev.private = &console_private;
        console_private.cdev = &console_cdev;
-       console_private.int_class = IOINT_CIO;
+       console_private.int_class = IRQIO_CIO;
        ret = ccw_device_console_enable(&console_cdev, sch);
        if (ret) {
                cio_release_console();
@@ -1715,13 +1715,13 @@ ccw_device_probe (struct device *dev)
        if (cdrv->int_class != 0)
                cdev->private->int_class = cdrv->int_class;
        else
-               cdev->private->int_class = IOINT_CIO;
+               cdev->private->int_class = IRQIO_CIO;
 
        ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
 
        if (ret) {
                cdev->drv = NULL;
-               cdev->private->int_class = IOINT_CIO;
+               cdev->private->int_class = IRQIO_CIO;
                return ret;
        }
 
@@ -1755,7 +1755,7 @@ ccw_device_remove (struct device *dev)
        }
        ccw_device_set_timeout(cdev, 0);
        cdev->drv = NULL;
-       cdev->private->int_class = IOINT_CIO;
+       cdev->private->int_class = IRQIO_CIO;
        return 0;
 }
 
index 2e575cff984549bbc90478663d6fcf3a23b10410..7d4ecb65db00cfab037ff4e4d6684570fcdf54e7 100644 (file)
@@ -61,11 +61,10 @@ dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
 
        if (dev_event == DEV_EVENT_INTERRUPT) {
                if (state == DEV_STATE_ONLINE)
-                       kstat_cpu(smp_processor_id()).
-                               irqs[cdev->private->int_class]++;
+                       inc_irq_stat(cdev->private->int_class);
                else if (state != DEV_STATE_CMFCHANGE &&
                         state != DEV_STATE_CMFUPDATE)
-                       kstat_cpu(smp_processor_id()).irqs[IOINT_CIO]++;
+                       inc_irq_stat(IRQIO_CIO);
        }
        dev_jumptable[state][dev_event](cdev, dev_event);
 }
index 6c9673400464fdc89b3a462e21820961f1de049a..d9eddcba7e884d788a9d8c1f3dad14bc3d71cf77 100644 (file)
@@ -139,7 +139,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
        EADM_LOG(6, "irq");
        EADM_LOG_HEX(6, irb, sizeof(*irb));
 
-       kstat_cpu(smp_processor_id()).irqs[IOINT_ADM]++;
+       inc_irq_stat(IRQIO_ADM);
 
        if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
            && scsw->eswf == 1 && irb->esw.eadm.erw.r)
index bdb394b066fcff56aac5588119c5ecc64efddf1f..bde5255200dc3864687fa955e69440503bd46b4b 100644 (file)
@@ -182,7 +182,7 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
        struct qdio_q *q;
 
        last_ai_time = S390_lowcore.int_clock;
-       kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
+       inc_irq_stat(IRQIO_QAI);
 
        /* protect tiq_list entries, only changed in activate or shutdown */
        rcu_read_lock();
index 7b865a7300e6a7da366762bf7b3882986fc462b3..b8b340ac53321f1028366d1a0cd18446050ed034 100644 (file)
@@ -1272,7 +1272,7 @@ out:
 
 static void ap_interrupt_handler(void *unused1, void *unused2)
 {
-       kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
+       inc_irq_stat(IRQIO_APB);
        tasklet_schedule(&ap_tasklet);
 }
 
index 7dabef624da394e6b90a2e04c787f74a4ffaa98d..8491111aec12d45b780243a907f71bb86adbfe17 100644 (file)
@@ -392,7 +392,7 @@ static void kvm_extint_handler(struct ext_code ext_code,
 
        if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64)
                return;
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
+       inc_irq_stat(IRQEXT_VRT);
 
        /* The LSB might be overloaded, we have to mask it */
        vq = (struct virtqueue *)(param64 & ~1UL);
index 5c70a6599578d6940faff2e1791e89ef4d4b769b..83bc9c5fa0c190885656955458b342d2265362f9 100644 (file)
@@ -282,7 +282,7 @@ static struct ccw_driver claw_ccw_driver = {
        .ids    = claw_ids,
        .probe  = ccwgroup_probe_ccwdev,
        .remove = ccwgroup_remove_ccwdev,
-       .int_class = IOINT_CLW,
+       .int_class = IRQIO_CLW,
 };
 
 static ssize_t claw_driver_group_store(struct device_driver *ddrv,
index 817b68925dddbd5c097129e6da6be7f41c77bdc8..676f12049a3693056d58ba216b0f1c3eeacdf92c 100644 (file)
@@ -1755,7 +1755,7 @@ static struct ccw_driver ctcm_ccw_driver = {
        .ids    = ctcm_ids,
        .probe  = ccwgroup_probe_ccwdev,
        .remove = ccwgroup_remove_ccwdev,
-       .int_class = IOINT_CTC,
+       .int_class = IRQIO_CTC,
 };
 
 static struct ccwgroup_driver ctcm_group_driver = {
index 2ca0f1dd7a00b857211348cf28d2c9e4a9df161a..c645dc9e98af83359d13b4975221a4d609ace853 100644 (file)
@@ -2384,7 +2384,7 @@ static struct ccw_driver lcs_ccw_driver = {
        .ids    = lcs_ids,
        .probe  = ccwgroup_probe_ccwdev,
        .remove = ccwgroup_remove_ccwdev,
-       .int_class = IOINT_LCS,
+       .int_class = IRQIO_LCS,
 };
 
 /**
index d73fdcfeb45a19e1695dc188b045e29c72e57557..2839baa82a5a458d0035d59e8a5666ac07690779 100644 (file)
@@ -633,7 +633,7 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return -ENOMEM;
        pci_set_drvdata(pdev, pci_info);
 
-       if (efi_enabled)
+       if (efi_enabled(EFI_RUNTIME_SERVICES))
                orom = isci_get_efi_var(pdev);
 
        if (!orom)
index 5aedcdf4ac5cd185a2ffac8ebcca2e3dcffd6c27..1ebe67cd18333c3b39f98d0e2badcd7b0a948090 100644 (file)
@@ -126,6 +126,12 @@ static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
 
 static int sh_clk_div_enable(struct clk *clk)
 {
+       if (clk->div_mask == SH_CLK_DIV6_MSK) {
+               int ret = sh_clk_div_set_rate(clk, clk->rate);
+               if (ret < 0)
+                       return ret;
+       }
+
        sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
        return 0;
 }
index 19ee901577da8d1649b975e9873b7a6c1e40029f..3a6083b386a134416c13b7f8e2c08cbe2e0fef11 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/of_gpio.h>
 #include <linux/pm_runtime.h>
 #include <linux/export.h>
-#include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/ioport.h>
index 97ac0a38e3d0c7277aaab702078d2878db20f1cd..eb2753008ef0550b6c3bfeda984ca9dfe47d26bd 100644 (file)
@@ -174,3 +174,15 @@ int ssb_gpio_init(struct ssb_bus *bus)
 
        return -1;
 }
+
+int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+       if (ssb_chipco_available(&bus->chipco) ||
+           ssb_extif_available(&bus->extif)) {
+               return gpiochip_remove(&bus->gpio);
+       } else {
+               SSB_WARN_ON(1);
+       }
+
+       return -1;
+}
index 772ad9b5c304fc986b459c4f4c6c5513e70ecd16..24dc331b4701efd796381882e635c6ef791c45cf 100644 (file)
@@ -443,6 +443,15 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
 
 void ssb_bus_unregister(struct ssb_bus *bus)
 {
+       int err;
+
+       err = ssb_gpio_unregister(bus);
+       if (err == -EBUSY)
+               ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
+       else if (err)
+               ssb_dprintk(KERN_ERR PFX
+                           "Can not unregister GPIO driver: %i\n", err);
+
        ssb_buses_lock();
        ssb_devices_unregister(bus);
        list_del(&bus->list);
index 6c10b66c796cf7ee23ae3e9334086034198e911c..da38305a2d22a2c24de7b4563da7ed0daa7354a0 100644 (file)
@@ -252,11 +252,16 @@ static inline void ssb_extif_init(struct ssb_extif *extif)
 
 #ifdef CONFIG_SSB_DRIVER_GPIO
 extern int ssb_gpio_init(struct ssb_bus *bus);
+extern int ssb_gpio_unregister(struct ssb_bus *bus);
 #else /* CONFIG_SSB_DRIVER_GPIO */
 static inline int ssb_gpio_init(struct ssb_bus *bus)
 {
        return -ENOTSUPP;
 }
+static inline int ssb_gpio_unregister(struct ssb_bus *bus)
+{
+       return 0;
+}
 #endif /* CONFIG_SSB_DRIVER_GPIO */
 
 #endif /* LINUX_SSB_PRIVATE_H_ */
index 7de2a10213bd9d49294daa57430992e6934168ea..36eec320569c6bd14d077be6328b34ae4fede36f 100644 (file)
@@ -444,6 +444,7 @@ config COMEDI_ADQ12B
 
 config COMEDI_NI_AT_A2150
        tristate "NI AT-A2150 ISA card support"
+       select COMEDI_FC
        depends on VIRT_TO_BUS
        ---help---
          Enable support for National Instruments AT-A2150 cards
index b7bba1790a20005bbb955c50ba9a142a1480ac51..9b038e4a7e711eea1b5bd3873706847b701de2fe 100644 (file)
@@ -1549,6 +1549,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
        if (cmd == COMEDI_DEVCONFIG) {
                rc = do_devconfig_ioctl(dev,
                                        (struct comedi_devconfig __user *)arg);
+               if (rc == 0)
+                       /* Evade comedi_auto_unconfig(). */
+                       dev_file_info->hardware_device = NULL;
                goto done;
        }
 
index fb3d09323ba145c2460408971459ee29f39bd63b..01de996239f1e6142f07f9851cc840e8ddc14a75 100644 (file)
@@ -345,7 +345,7 @@ static int waveform_ai_cancel(struct comedi_device *dev,
        struct waveform_private *devpriv = dev->private;
 
        devpriv->timer_running = 0;
-       del_timer(&devpriv->timer);
+       del_timer_sync(&devpriv->timer);
        return 0;
 }
 
index aaac0b2cc9eb7172ab963aa361e8d7e828ceb751..fd1662b4175db8755b8c605ed90f08437c12ba4c 100644 (file)
@@ -963,7 +963,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -982,7 +982,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1001,7 +1001,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1037,7 +1037,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 32,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1056,7 +1056,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_625x_ao,
         .reg_type = ni_reg_625x,
         .ao_unipolar = 0,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 32,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1092,7 +1092,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_628x_ao,
         .reg_type = ni_reg_628x,
         .ao_unipolar = 1,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1111,7 +1111,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_628x_ao,
         .reg_type = ni_reg_628x,
         .ao_unipolar = 1,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 8,
         .caldac = {caldac_none},
         .has_8255 = 0,
@@ -1147,7 +1147,7 @@ static const struct ni_board_struct ni_boards[] = {
         .ao_range_table = &range_ni_M_628x_ao,
         .reg_type = ni_reg_628x,
         .ao_unipolar = 1,
-        .ao_speed = 357,
+        .ao_speed = 350,
         .num_p0_dio_channels = 32,
         .caldac = {caldac_none},
         .has_8255 = 0,
index 1a1f5c79822ae1dbea65497ba6aac2d71a7a9216..7b133597e923f792a942cffff1ff399546fed8ee 100644 (file)
@@ -15,7 +15,7 @@
  */
 #include "csr_wifi_hip_unifi.h"
 #include "unifi_priv.h"
-
+#include <linux/sched/rt.h>
 
 /*
  * ---------------------------------------------------------------------------
index 7c6c4138fc76dba99998e7be91420cc3a0d01035..49395da34b7f07cd286e307f7d4f832fae3a5070 100644 (file)
@@ -15,7 +15,7 @@
 #include "unifi_priv.h"
 #include "csr_wifi_hip_unifi.h"
 #include "csr_wifi_hip_conversions.h"
-
+#include <linux/sched/rt.h>
 
 
 
index 580406cb1808e55b201aafad0611f62ed536744f..b2f8331e4acf90005887a7f1e5947d0c7b560aea 100644 (file)
@@ -3,7 +3,9 @@ config FIREWIRE_SERIAL
        depends on FIREWIRE
        help
           This enables TTY over IEEE 1394, providing high-speed serial
-         connectivity to cabled peers.
+         connectivity to cabled peers. This driver implements a
+         ad-hoc transport protocol and is currently limited to
+         Linux-to-Linux communication.
 
          To compile this driver as a module, say M here:  the module will
          be called firewire-serial.
index 726900548eae5c8136861588d4bce475cd9349b4..8dae8fb252233905b380927218cb80b8bb96e4cc 100644 (file)
@@ -1,5 +1,5 @@
-TODOs
------
+TODOs prior to this driver moving out of staging
+------------------------------------------------
 1. Implement retries for RCODE_BUSY, RCODE_NO_ACK and RCODE_SEND_ERROR
    - I/O is handled asynchronously which presents some issues when error
      conditions occur.
@@ -11,17 +11,9 @@ TODOs
 -- Issues with firewire stack --
 1. This driver uses the same unregistered vendor id that the firewire core does
      (0xd00d1e). Perhaps this could be exposed as a define in
-     firewire-constants.h?
-2. MAX_ASYNC_PAYLOAD needs to be publicly exposed by core/ohci
-   - otherwise how will this driver know the max size of address window to
-     open for one packet write?
+     firewire.h?
 3. Maybe device_max_receive() and link_speed_to_max_payload() should be
      taken up by the firewire core?
-4. To avoid dropping rx data while still limiting the maximum buffering,
-     the size of the AR context must be known. How to expose this to drivers?
-5. Explore if bigger AR context will reduce RCODE_BUSY responses
-   (or auto-grow to certain max size -- but this would require major surgery
-    as the current AR is contiguously mapped)
 
 -- Issues with TTY core --
   1. Hack for alternate device name scheme
index 61ee29083b268ece51870b1c80c60035acd4fa55..d03a7f57e8d475ceddedb052d42b3077c2a7745d 100644 (file)
@@ -179,7 +179,7 @@ static void dump_profile(struct seq_file *m, struct stats *stats)
 /* Returns the max receive packet size for the given card */
 static inline int device_max_receive(struct fw_device *fw_device)
 {
-       return 1 <<  (clamp_t(int, fw_device->max_rec, 8U, 13U) + 1);
+       return 1 <<  (clamp_t(int, fw_device->max_rec, 8U, 11U) + 1);
 }
 
 static void fwtty_log_tx_error(struct fwtty_port *port, int rcode)
index 8b572edf95634264ce5897938b1bc279a618c243..caa1c1ea82d5f783782efd263f2f60500a68062b 100644 (file)
@@ -374,10 +374,10 @@ static inline void fwtty_bind_console(struct fwtty_port *port,
  */
 static inline int link_speed_to_max_payload(unsigned speed)
 {
-       static const int max_async[] = { 307, 614, 1229, 2458, 4916, 9832, };
-       BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_3200);
+       static const int max_async[] = { 307, 614, 1229, 2458, };
+       BUILD_BUG_ON(ARRAY_SIZE(max_async) - 1 != SCODE_800);
 
-       speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_3200);
+       speed = clamp(speed, (unsigned) SCODE_100, (unsigned) SCODE_800);
        if (limit_bw)
                return max_async[speed];
        else
index fb31b457a56a9b2b50fe96982ce083d25d89a032..c5ceb9d90ea83a696a4ca4518dc379a2c6a82831 100644 (file)
@@ -239,7 +239,7 @@ static irqreturn_t mxs_lradc_trigger_handler(int irq, void *p)
        struct mxs_lradc *lradc = iio_priv(iio);
        const uint32_t chan_value = LRADC_CH_ACCUMULATE |
                ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
-       int i, j = 0;
+       unsigned int i, j = 0;
 
        for_each_set_bit(i, iio->active_scan_mask, iio->masklength) {
                lradc->buffer[j] = readl(lradc->base + LRADC_CH(j));
index ea295b25308c470525e9213ad039c46eadc95ca5..87979a0d03a91358d1e892ed133292f855bfad3e 100644 (file)
@@ -27,8 +27,8 @@ config ADIS16130
 config ADIS16260
        tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
        depends on SPI
-       select IIO_TRIGGER if IIO_BUFFER
-       select IIO_SW_RING if IIO_BUFFER
+       select IIO_ADIS_LIB
+       select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
        help
          Say yes here to build support for Analog Devices ADIS16260 ADIS16265
          ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
index 3525a68d6a7529e01eb17c83bc00db5841faa33e..41d7350d030f3e9341175e8b4771068b2913a087 100644 (file)
@@ -69,7 +69,7 @@ static int adis16080_spi_read(struct iio_dev *indio_dev,
        ret = spi_read(st->us, st->buf, 2);
 
        if (ret == 0)
-               *val = ((st->buf[0] & 0xF) << 8) | st->buf[1];
+               *val = sign_extend32(((st->buf[0] & 0xF) << 8) | st->buf[1], 11);
        mutex_unlock(&st->buf_lock);
 
        return ret;
index 7d3207559265c35085652be0a23b3942f5031171..d44d3ad26fa517527f3d1369e28e221b40bc72f3 100644 (file)
@@ -21,7 +21,6 @@ config IIO_GPIO_TRIGGER
 config IIO_SYSFS_TRIGGER
        tristate "SYSFS trigger"
        depends on SYSFS
-       depends on HAVE_IRQ_WORK
        select IRQ_WORK
        help
          Provides support for using SYSFS entry as IIO triggers.
index ecf0f44bc70e1daf8dc4133d5d1a56e251560e8e..cec19f1cf56cb261ae5dc4782d266022813e6c15 100644 (file)
@@ -584,7 +584,6 @@ int imx_drm_add_encoder(struct drm_encoder *encoder,
 
        ret = imx_drm_encoder_register(imx_drm_encoder);
        if (ret) {
-               kfree(imx_drm_encoder);
                ret = -ENOMEM;
                goto err_register;
        }
index 677e665ca86d62dc764db90ca7205385cd2166c2..f7059cddd7fdb2da864ff19975d4ca99fbb0664f 100644 (file)
@@ -1104,7 +1104,9 @@ static int ipu_probe(struct platform_device *pdev)
        if (ret)
                goto out_failed_irq;
 
-       ipu_reset(ipu);
+       ret = ipu_reset(ipu);
+       if (ret)
+               goto out_failed_reset;
 
        /* Set MCU_T to divide MCU access window into 2 */
        ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
@@ -1129,6 +1131,7 @@ failed_add_clients:
        ipu_submodules_exit(ipu);
 failed_submodules_init:
        ipu_irq_exit(ipu);
+out_failed_reset:
 out_failed_irq:
        clk_disable_unprepare(ipu->clk);
 failed_clk_get:
index 1892006526b52fbf85eb19ca8e943db1b96615da..4b3a019409b5793632efa538cdcf65e0030f217f 100644 (file)
@@ -452,7 +452,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
        int ret;
 
        ipu_crtc->ipu_ch = ipu_idmac_get(ipu, pdata->dma[0]);
-       if (IS_ERR_OR_NULL(ipu_crtc->ipu_ch)) {
+       if (IS_ERR(ipu_crtc->ipu_ch)) {
                ret = PTR_ERR(ipu_crtc->ipu_ch);
                goto err_out;
        }
@@ -472,7 +472,7 @@ static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
        if (pdata->dp >= 0) {
                ipu_crtc->dp = ipu_dp_get(ipu, pdata->dp);
                if (IS_ERR(ipu_crtc->dp)) {
-                       ret = PTR_ERR(ipu_crtc->ipu_ch);
+                       ret = PTR_ERR(ipu_crtc->dp);
                        goto err_out;
                }
        }
@@ -548,6 +548,8 @@ static int ipu_drm_probe(struct platform_device *pdev)
        ipu_crtc->dev = &pdev->dev;
 
        ret = ipu_crtc_init(ipu_crtc, pdata);
+       if (ret)
+               return ret;
 
        platform_set_drvdata(pdev, ipu_crtc);
 
index b724a41314359053d598176e7ac871ba3a63704e..09f65dc3d2c85397e4147e51291c1b7510450d03 100644 (file)
@@ -3,8 +3,8 @@ config DRM_OMAP
        tristate "OMAP DRM"
        depends on DRM && !CONFIG_FB_OMAP2
        depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+       depends on OMAP2_DSS
        select DRM_KMS_HELPER
-       select OMAP2_DSS
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
index 1ca0e0016de424b249219b178dcb9ee71654fd06..d85e058f2845a014c0216cabeda52d70cc7bf0b6 100644 (file)
@@ -5,6 +5,7 @@
 
 ccflags-y := -Iinclude/drm -Werror
 omapdrm-y := omap_drv.o \
+       omap_irq.o \
        omap_debugfs.o \
        omap_crtc.o \
        omap_plane.o \
index 938c7888ca319ae1d82e566b086ab1e9abf133a2..abeeb00aaa12ca758c4c3c085b170ffe709fc5af 100644 (file)
@@ -17,9 +17,6 @@ TODO
 . Revisit GEM sync object infrastructure.. TTM has some framework for this
   already.  Possibly this could be refactored out and made more common?
   There should be some way to do this with less wheel-reinvention.
-. Review DSS vs KMS mismatches.  The omap_dss_device is sort of part encoder,
-  part connector.  Which results in a bit of duct tape to fwd calls from
-  encoder to connector.  Possibly this could be done a bit better.
 . Solve PM sequencing on resume.  DMM/TILER must be reloaded before any
   access is made from any component in the system.  Which means on suspend
   CRTC's should be disabled, and on resume the LUT should be reprogrammed
index 91edb3f9697292961883ca5876f4ab520b27ed6a..4cc9ee733c5fb4a80c656ad978ca6a349e07b8dc 100644 (file)
 struct omap_connector {
        struct drm_connector base;
        struct omap_dss_device *dssdev;
+       struct drm_encoder *encoder;
 };
 
-static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
                struct omap_video_timings *timings)
 {
        mode->clock = timings->pixel_clock;
@@ -64,7 +65,7 @@ static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
                mode->flags |= DRM_MODE_FLAG_NVSYNC;
 }
 
-static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
                struct drm_display_mode *mode)
 {
        timings->pixel_clock = mode->clock;
@@ -96,48 +97,7 @@ static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
        timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
 }
 
-static void omap_connector_dpms(struct drm_connector *connector, int mode)
-{
-       struct omap_connector *omap_connector = to_omap_connector(connector);
-       struct omap_dss_device *dssdev = omap_connector->dssdev;
-       int old_dpms;
-
-       DBG("%s: %d", dssdev->name, mode);
-
-       old_dpms = connector->dpms;
-
-       /* from off to on, do from crtc to connector */
-       if (mode < old_dpms)
-               drm_helper_connector_dpms(connector, mode);
-
-       if (mode == DRM_MODE_DPMS_ON) {
-               /* store resume info for suspended displays */
-               switch (dssdev->state) {
-               case OMAP_DSS_DISPLAY_SUSPENDED:
-                       dssdev->activate_after_resume = true;
-                       break;
-               case OMAP_DSS_DISPLAY_DISABLED: {
-                       int ret = dssdev->driver->enable(dssdev);
-                       if (ret) {
-                               DBG("%s: failed to enable: %d",
-                                               dssdev->name, ret);
-                               dssdev->driver->disable(dssdev);
-                       }
-                       break;
-               }
-               default:
-                       break;
-               }
-       } else {
-               /* TODO */
-       }
-
-       /* from on to off, do from connector to crtc */
-       if (mode > old_dpms)
-               drm_helper_connector_dpms(connector, mode);
-}
-
-enum drm_connector_status omap_connector_detect(
+static enum drm_connector_status omap_connector_detect(
                struct drm_connector *connector, bool force)
 {
        struct omap_connector *omap_connector = to_omap_connector(connector);
@@ -164,8 +124,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
        struct omap_connector *omap_connector = to_omap_connector(connector);
        struct omap_dss_device *dssdev = omap_connector->dssdev;
 
-       dssdev->driver->disable(dssdev);
-
        DBG("%s", omap_connector->dssdev->name);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
@@ -261,36 +219,12 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
 struct drm_encoder *omap_connector_attached_encoder(
                struct drm_connector *connector)
 {
-       int i;
        struct omap_connector *omap_connector = to_omap_connector(connector);
-
-       for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
-               struct drm_mode_object *obj;
-
-               if (connector->encoder_ids[i] == 0)
-                       break;
-
-               obj = drm_mode_object_find(connector->dev,
-                               connector->encoder_ids[i],
-                               DRM_MODE_OBJECT_ENCODER);
-
-               if (obj) {
-                       struct drm_encoder *encoder = obj_to_encoder(obj);
-                       struct omap_overlay_manager *mgr =
-                                       omap_encoder_get_manager(encoder);
-                       DBG("%s: found %s", omap_connector->dssdev->name,
-                                       mgr->name);
-                       return encoder;
-               }
-       }
-
-       DBG("%s: no encoder", omap_connector->dssdev->name);
-
-       return NULL;
+       return omap_connector->encoder;
 }
 
 static const struct drm_connector_funcs omap_connector_funcs = {
-       .dpms = omap_connector_dpms,
+       .dpms = drm_helper_connector_dpms,
        .detect = omap_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = omap_connector_destroy,
@@ -302,34 +236,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
        .best_encoder = omap_connector_attached_encoder,
 };
 
-/* called from encoder when mode is set, to propagate settings to the dssdev */
-void omap_connector_mode_set(struct drm_connector *connector,
-               struct drm_display_mode *mode)
-{
-       struct drm_device *dev = connector->dev;
-       struct omap_connector *omap_connector = to_omap_connector(connector);
-       struct omap_dss_device *dssdev = omap_connector->dssdev;
-       struct omap_dss_driver *dssdrv = dssdev->driver;
-       struct omap_video_timings timings = {0};
-
-       copy_timings_drm_to_omap(&timings, mode);
-
-       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
-                       omap_connector->dssdev->name,
-                       mode->base.id, mode->name, mode->vrefresh, mode->clock,
-                       mode->hdisplay, mode->hsync_start,
-                       mode->hsync_end, mode->htotal,
-                       mode->vdisplay, mode->vsync_start,
-                       mode->vsync_end, mode->vtotal, mode->type, mode->flags);
-
-       if (dssdrv->check_timings(dssdev, &timings)) {
-               dev_err(dev->dev, "could not set timings\n");
-               return;
-       }
-
-       dssdrv->set_timings(dssdev, &timings);
-}
-
 /* flush an area of the framebuffer (in case of manual update display that
  * is not automatically flushed)
  */
@@ -344,7 +250,8 @@ void omap_connector_flush(struct drm_connector *connector,
 
 /* initialize connector */
 struct drm_connector *omap_connector_init(struct drm_device *dev,
-               int connector_type, struct omap_dss_device *dssdev)
+               int connector_type, struct omap_dss_device *dssdev,
+               struct drm_encoder *encoder)
 {
        struct drm_connector *connector = NULL;
        struct omap_connector *omap_connector;
@@ -360,6 +267,8 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
        }
 
        omap_connector->dssdev = dssdev;
+       omap_connector->encoder = encoder;
+
        connector = &omap_connector->base;
 
        drm_connector_init(dev, connector, &omap_connector_funcs,
index d87bd84257bd7542c6175964e3f5510822958a51..5c6ed6040eff572a384659561e1fdbaf427c883c 100644 (file)
 struct omap_crtc {
        struct drm_crtc base;
        struct drm_plane *plane;
+
        const char *name;
-       int id;
+       int pipe;
+       enum omap_channel channel;
+       struct omap_overlay_manager_info info;
+
+       /*
+        * Temporary: eventually this will go away, but it is needed
+        * for now to keep the output's happy.  (They only need
+        * mgr->id.)  Eventually this will be replaced w/ something
+        * more common-panel-framework-y
+        */
+       struct omap_overlay_manager mgr;
+
+       struct omap_video_timings timings;
+       bool enabled;
+       bool full_update;
+
+       struct omap_drm_apply apply;
+
+       struct omap_drm_irq apply_irq;
+       struct omap_drm_irq error_irq;
+
+       /* list of in-progress apply's: */
+       struct list_head pending_applies;
+
+       /* list of queued apply's: */
+       struct list_head queued_applies;
+
+       /* for handling queued and in-progress applies: */
+       struct work_struct apply_work;
 
        /* if there is a pending flip, these will be non-null: */
        struct drm_pending_vblank_event *event;
        struct drm_framebuffer *old_fb;
+
+       /* for handling page flips without caring about what
+        * the callback is called from.  Possibly we should just
+        * make omap_gem always call the cb from the worker so
+        * we don't have to care about this..
+        *
+        * XXX maybe fold into apply_work??
+        */
+       struct work_struct page_flip_work;
+};
+
+/*
+ * Manager-ops, callbacks from output when they need to configure
+ * the upstream part of the video pipe.
+ *
+ * Most of these we can ignore until we add support for command-mode
+ * panels.. for video-mode the crtc-helpers already do an adequate
+ * job of sequencing the setup of the video pipe in the proper order
+ */
+
+/* we can probably ignore these until we support command-mode panels: */
+static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
+{
+}
+
+static int omap_crtc_enable(struct omap_overlay_manager *mgr)
+{
+       return 0;
+}
+
+static void omap_crtc_disable(struct omap_overlay_manager *mgr)
+{
+}
+
+static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
+               const struct omap_video_timings *timings)
+{
+       struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+       DBG("%s", omap_crtc->name);
+       omap_crtc->timings = *timings;
+       omap_crtc->full_update = true;
+}
+
+static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
+               const struct dss_lcd_mgr_config *config)
+{
+       struct omap_crtc *omap_crtc = container_of(mgr, struct omap_crtc, mgr);
+       DBG("%s", omap_crtc->name);
+       dispc_mgr_set_lcd_config(omap_crtc->channel, config);
+}
+
+static int omap_crtc_register_framedone_handler(
+               struct omap_overlay_manager *mgr,
+               void (*handler)(void *), void *data)
+{
+       return 0;
+}
+
+static void omap_crtc_unregister_framedone_handler(
+               struct omap_overlay_manager *mgr,
+               void (*handler)(void *), void *data)
+{
+}
+
+static const struct dss_mgr_ops mgr_ops = {
+               .start_update = omap_crtc_start_update,
+               .enable = omap_crtc_enable,
+               .disable = omap_crtc_disable,
+               .set_timings = omap_crtc_set_timings,
+               .set_lcd_config = omap_crtc_set_lcd_config,
+               .register_framedone_handler = omap_crtc_register_framedone_handler,
+               .unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
 };
 
+/*
+ * CRTC funcs:
+ */
+
 static void omap_crtc_destroy(struct drm_crtc *crtc)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+       DBG("%s", omap_crtc->name);
+
+       WARN_ON(omap_crtc->apply_irq.registered);
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
        omap_crtc->plane->funcs->destroy(omap_crtc->plane);
        drm_crtc_cleanup(crtc);
+
        kfree(omap_crtc);
 }
 
@@ -48,14 +160,25 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
        struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
        int i;
 
-       WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+       DBG("%s: %d", omap_crtc->name, mode);
+
+       if (enabled != omap_crtc->enabled) {
+               omap_crtc->enabled = enabled;
+               omap_crtc->full_update = true;
+               omap_crtc_apply(crtc, &omap_crtc->apply);
 
-       for (i = 0; i < priv->num_planes; i++) {
-               struct drm_plane *plane = priv->planes[i];
-               if (plane->crtc == crtc)
-                       WARN_ON(omap_plane_dpms(plane, mode));
+               /* also enable our private plane: */
+               WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+               /* and any attached overlay planes: */
+               for (i = 0; i < priv->num_planes; i++) {
+                       struct drm_plane *plane = priv->planes[i];
+                       if (plane->crtc == crtc)
+                               WARN_ON(omap_plane_dpms(plane, mode));
+               }
        }
 }
 
@@ -73,12 +196,26 @@ static int omap_crtc_mode_set(struct drm_crtc *crtc,
                struct drm_framebuffer *old_fb)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-       struct drm_plane *plane = omap_crtc->plane;
 
-       return omap_plane_mode_set(plane, crtc, crtc->fb,
+       mode = adjusted_mode;
+
+       DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+                       omap_crtc->name, mode->base.id, mode->name,
+                       mode->vrefresh, mode->clock,
+                       mode->hdisplay, mode->hsync_start,
+                       mode->hsync_end, mode->htotal,
+                       mode->vdisplay, mode->vsync_start,
+                       mode->vsync_end, mode->vtotal,
+                       mode->type, mode->flags);
+
+       copy_timings_drm_to_omap(&omap_crtc->timings, mode);
+       omap_crtc->full_update = true;
+
+       return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
                        0, 0, mode->hdisplay, mode->vdisplay,
                        x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       NULL, NULL);
 }
 
 static void omap_crtc_prepare(struct drm_crtc *crtc)
@@ -102,10 +239,11 @@ static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        struct drm_plane *plane = omap_crtc->plane;
        struct drm_display_mode *mode = &crtc->mode;
 
-       return plane->funcs->update_plane(plane, crtc, crtc->fb,
+       return omap_plane_mode_set(plane, crtc, crtc->fb,
                        0, 0, mode->hdisplay, mode->vdisplay,
                        x << 16, y << 16,
-                       mode->hdisplay << 16, mode->vdisplay << 16);
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       NULL, NULL);
 }
 
 static void omap_crtc_load_lut(struct drm_crtc *crtc)
@@ -114,63 +252,54 @@ static void omap_crtc_load_lut(struct drm_crtc *crtc)
 
 static void vblank_cb(void *arg)
 {
-       static uint32_t sequence;
        struct drm_crtc *crtc = arg;
        struct drm_device *dev = crtc->dev;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-       struct drm_pending_vblank_event *event = omap_crtc->event;
        unsigned long flags;
-       struct timeval now;
 
-       WARN_ON(!event);
+       spin_lock_irqsave(&dev->event_lock, flags);
+
+       /* wakeup userspace */
+       if (omap_crtc->event)
+               drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
 
        omap_crtc->event = NULL;
+       omap_crtc->old_fb = NULL;
 
-       /* wakeup userspace */
-       if (event) {
-               do_gettimeofday(&now);
-
-               spin_lock_irqsave(&dev->event_lock, flags);
-               /* TODO: we can't yet use the vblank time accounting,
-                * because omapdss lower layer is the one that knows
-                * the irq # and registers the handler, which more or
-                * less defeats how drm_irq works.. for now just fake
-                * the sequence number and use gettimeofday..
-                *
-               event->event.sequence = drm_vblank_count_and_time(
-                               dev, omap_crtc->id, &now);
-                */
-               event->event.sequence = sequence++;
-               event->event.tv_sec = now.tv_sec;
-               event->event.tv_usec = now.tv_usec;
-               list_add_tail(&event->base.link,
-                               &event->base.file_priv->event_list);
-               wake_up_interruptible(&event->base.file_priv->event_wait);
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-static void page_flip_cb(void *arg)
+static void page_flip_worker(struct work_struct *work)
 {
-       struct drm_crtc *crtc = arg;
-       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-       struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+       struct omap_crtc *omap_crtc =
+                       container_of(work, struct omap_crtc, page_flip_work);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_display_mode *mode = &crtc->mode;
        struct drm_gem_object *bo;
 
-       omap_crtc->old_fb = NULL;
-
-       omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
-
-       /* really we'd like to setup the callback atomically w/ setting the
-        * new scanout buffer to avoid getting stuck waiting an extra vblank
-        * cycle.. for now go for correctness and later figure out speed..
-        */
-       omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc);
+       mutex_lock(&dev->mode_config.mutex);
+       omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
+                       0, 0, mode->hdisplay, mode->vdisplay,
+                       crtc->x << 16, crtc->y << 16,
+                       mode->hdisplay << 16, mode->vdisplay << 16,
+                       vblank_cb, crtc);
+       mutex_unlock(&dev->mode_config.mutex);
 
        bo = omap_framebuffer_bo(crtc->fb, 0);
        drm_gem_object_unreference_unlocked(bo);
 }
 
+static void page_flip_cb(void *arg)
+{
+       struct drm_crtc *crtc = arg;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct omap_drm_private *priv = crtc->dev->dev_private;
+
+       /* avoid assumptions about what ctxt we are called from: */
+       queue_work(priv->wq, &omap_crtc->page_flip_work);
+}
+
 static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
                 struct drm_framebuffer *fb,
                 struct drm_pending_vblank_event *event)
@@ -179,14 +308,14 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
        struct drm_gem_object *bo;
 
-       DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+       DBG("%d -> %d (event=%p)", crtc->fb ? crtc->fb->base.id : -1,
+                       fb->base.id, event);
 
-       if (omap_crtc->event) {
+       if (omap_crtc->old_fb) {
                dev_err(dev->dev, "already a pending flip\n");
                return -EINVAL;
        }
 
-       omap_crtc->old_fb = crtc->fb;
        omap_crtc->event = event;
        crtc->fb = fb;
 
@@ -234,14 +363,244 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
        .load_lut = omap_crtc_load_lut,
 };
 
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return &omap_crtc->timings;
+}
+
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       return omap_crtc->channel;
+}
+
+static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(irq, struct omap_crtc, error_irq);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
+       /* avoid getting in a flood, unregister the irq until next vblank */
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(irq, struct omap_crtc, apply_irq);
+       struct drm_crtc *crtc = &omap_crtc->base;
+
+       if (!omap_crtc->error_irq.registered)
+               omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+
+       if (!dispc_mgr_go_busy(omap_crtc->channel)) {
+               struct omap_drm_private *priv =
+                               crtc->dev->dev_private;
+               DBG("%s: apply done", omap_crtc->name);
+               omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
+               queue_work(priv->wq, &omap_crtc->apply_work);
+       }
+}
+
+static void apply_worker(struct work_struct *work)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(work, struct omap_crtc, apply_work);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct omap_drm_apply *apply, *n;
+       bool need_apply;
+
+       /*
+        * Synchronize everything on mode_config.mutex, to keep
+        * the callbacks and list modification all serialized
+        * with respect to modesetting ioctls from userspace.
+        */
+       mutex_lock(&dev->mode_config.mutex);
+       dispc_runtime_get();
+
+       /*
+        * If we are still pending a previous update, wait.. when the
+        * pending update completes, we get kicked again.
+        */
+       if (omap_crtc->apply_irq.registered)
+               goto out;
+
+       /* finish up previous apply's: */
+       list_for_each_entry_safe(apply, n,
+                       &omap_crtc->pending_applies, pending_node) {
+               apply->post_apply(apply);
+               list_del(&apply->pending_node);
+       }
+
+       need_apply = !list_empty(&omap_crtc->queued_applies);
+
+       /* then handle the next round of of queued apply's: */
+       list_for_each_entry_safe(apply, n,
+                       &omap_crtc->queued_applies, queued_node) {
+               apply->pre_apply(apply);
+               list_del(&apply->queued_node);
+               apply->queued = false;
+               list_add_tail(&apply->pending_node,
+                               &omap_crtc->pending_applies);
+       }
+
+       if (need_apply) {
+               enum omap_channel channel = omap_crtc->channel;
+
+               DBG("%s: GO", omap_crtc->name);
+
+               if (dispc_mgr_is_enabled(channel)) {
+                       omap_irq_register(dev, &omap_crtc->apply_irq);
+                       dispc_mgr_go(channel);
+               } else {
+                       struct omap_drm_private *priv = dev->dev_private;
+                       queue_work(priv->wq, &omap_crtc->apply_work);
+               }
+       }
+
+out:
+       dispc_runtime_put();
+       mutex_unlock(&dev->mode_config.mutex);
+}
+
+int omap_crtc_apply(struct drm_crtc *crtc,
+               struct omap_drm_apply *apply)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+       /* no need to queue it again if it is already queued: */
+       if (apply->queued)
+               return 0;
+
+       apply->queued = true;
+       list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
+
+       /*
+        * If there are no currently pending updates, then go ahead and
+        * kick the worker immediately, otherwise it will run again when
+        * the current update finishes.
+        */
+       if (list_empty(&omap_crtc->pending_applies)) {
+               struct omap_drm_private *priv = crtc->dev->dev_private;
+               queue_work(priv->wq, &omap_crtc->apply_work);
+       }
+
+       return 0;
+}
+
+/* called only from apply */
+static void set_enabled(struct drm_crtc *crtc, bool enable)
+{
+       struct drm_device *dev = crtc->dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       enum omap_channel channel = omap_crtc->channel;
+       struct omap_irq_wait *wait = NULL;
+
+       if (dispc_mgr_is_enabled(channel) == enable)
+               return;
+
+       /* ignore sync-lost irqs during enable/disable */
+       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
+
+       if (dispc_mgr_get_framedone_irq(channel)) {
+               if (!enable) {
+                       wait = omap_irq_wait_init(dev,
+                                       dispc_mgr_get_framedone_irq(channel), 1);
+               }
+       } else {
+               /*
+                * When we disable digit output, we need to wait until fields
+                * are done.  Otherwise the DSS is still working, and turning
+                * off the clocks prevents DSS from going to OFF mode. And when
+                * enabling, we need to wait for the extra sync losts
+                */
+               wait = omap_irq_wait_init(dev,
+                               dispc_mgr_get_vsync_irq(channel), 2);
+       }
+
+       dispc_mgr_enable(channel, enable);
+
+       if (wait) {
+               int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
+               if (ret) {
+                       dev_err(dev->dev, "%s: timeout waiting for %s\n",
+                                       omap_crtc->name, enable ? "enable" : "disable");
+               }
+       }
+
+       omap_irq_register(crtc->dev, &omap_crtc->error_irq);
+}
+
+static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
+{
+       struct omap_crtc *omap_crtc =
+                       container_of(apply, struct omap_crtc, apply);
+       struct drm_crtc *crtc = &omap_crtc->base;
+       struct drm_encoder *encoder = NULL;
+
+       DBG("%s: enabled=%d, full=%d", omap_crtc->name,
+                       omap_crtc->enabled, omap_crtc->full_update);
+
+       if (omap_crtc->full_update) {
+               struct omap_drm_private *priv = crtc->dev->dev_private;
+               int i;
+               for (i = 0; i < priv->num_encoders; i++) {
+                       if (priv->encoders[i]->crtc == crtc) {
+                               encoder = priv->encoders[i];
+                               break;
+                       }
+               }
+       }
+
+       if (!omap_crtc->enabled) {
+               set_enabled(&omap_crtc->base, false);
+               if (encoder)
+                       omap_encoder_set_enabled(encoder, false);
+       } else {
+               if (encoder) {
+                       omap_encoder_set_enabled(encoder, false);
+                       omap_encoder_update(encoder, &omap_crtc->mgr,
+                                       &omap_crtc->timings);
+                       omap_encoder_set_enabled(encoder, true);
+                       omap_crtc->full_update = false;
+               }
+
+               dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
+               dispc_mgr_set_timings(omap_crtc->channel,
+                               &omap_crtc->timings);
+               set_enabled(&omap_crtc->base, true);
+       }
+
+       omap_crtc->full_update = false;
+}
+
+static void omap_crtc_post_apply(struct omap_drm_apply *apply)
+{
+       /* nothing needed for post-apply */
+}
+
+static const char *channel_names[] = {
+               [OMAP_DSS_CHANNEL_LCD] = "lcd",
+               [OMAP_DSS_CHANNEL_DIGIT] = "tv",
+               [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
+};
+
 /* initialize crtc */
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
-               struct omap_overlay *ovl, int id)
+               struct drm_plane *plane, enum omap_channel channel, int id)
 {
        struct drm_crtc *crtc = NULL;
-       struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+       struct omap_crtc *omap_crtc;
+       struct omap_overlay_manager_info *info;
+
+       DBG("%s", channel_names[channel]);
 
-       DBG("%s", ovl->name);
+       omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
 
        if (!omap_crtc) {
                dev_err(dev->dev, "could not allocate CRTC\n");
@@ -250,10 +609,40 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 
        crtc = &omap_crtc->base;
 
-       omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true);
+       INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
+       INIT_WORK(&omap_crtc->apply_work, apply_worker);
+
+       INIT_LIST_HEAD(&omap_crtc->pending_applies);
+       INIT_LIST_HEAD(&omap_crtc->queued_applies);
+
+       omap_crtc->apply.pre_apply  = omap_crtc_pre_apply;
+       omap_crtc->apply.post_apply = omap_crtc_post_apply;
+
+       omap_crtc->apply_irq.irqmask = pipe2vbl(id);
+       omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
+
+       omap_crtc->error_irq.irqmask =
+                       dispc_mgr_get_sync_lost_irq(channel);
+       omap_crtc->error_irq.irq = omap_crtc_error_irq;
+       omap_irq_register(dev, &omap_crtc->error_irq);
+
+       omap_crtc->channel = channel;
+       omap_crtc->plane = plane;
        omap_crtc->plane->crtc = crtc;
-       omap_crtc->name = ovl->name;
-       omap_crtc->id = id;
+       omap_crtc->name = channel_names[channel];
+       omap_crtc->pipe = id;
+
+       /* temporary: */
+       omap_crtc->mgr.id = channel;
+
+       dss_install_mgr_ops(&mgr_ops);
+
+       /* TODO: fix hard-coded setup.. add properties! */
+       info = &omap_crtc->info;
+       info->default_color = 0x00000000;
+       info->trans_key = 0x00000000;
+       info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+       info->trans_enabled = false;
 
        drm_crtc_init(dev, crtc, &omap_crtc_funcs);
        drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
index 84943e5ba1d6563f49877ada93e022a1d67b5385..ae5ecc2efbc758268d692d732482d1f49f13c352 100644 (file)
@@ -74,320 +74,99 @@ static int get_connector_type(struct omap_dss_device *dssdev)
        }
 }
 
-#if 0 /* enable when dss2 supports hotplug */
-static int omap_drm_notifier(struct notifier_block *nb,
-               unsigned long evt, void *arg)
-{
-       switch (evt) {
-       case OMAP_DSS_SIZE_CHANGE:
-       case OMAP_DSS_HOTPLUG_CONNECT:
-       case OMAP_DSS_HOTPLUG_DISCONNECT: {
-               struct drm_device *dev = drm_device;
-               DBG("hotplug event: evt=%d, dev=%p", evt, dev);
-               if (dev)
-                       drm_sysfs_hotplug_event(dev);
-
-               return NOTIFY_OK;
-       }
-       default:  /* don't care about other events for now */
-               return NOTIFY_DONE;
-       }
-}
-#endif
-
-static void dump_video_chains(void)
-{
-       int i;
-
-       DBG("dumping video chains: ");
-       for (i = 0; i < omap_dss_get_num_overlays(); i++) {
-               struct omap_overlay *ovl = omap_dss_get_overlay(i);
-               struct omap_overlay_manager *mgr = ovl->manager;
-               struct omap_dss_device *dssdev = mgr ?
-                                       mgr->get_device(mgr) : NULL;
-               if (dssdev) {
-                       DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
-                                               dssdev->name);
-               } else if (mgr) {
-                       DBG("%d: %s -> %s", i, ovl->name, mgr->name);
-               } else {
-                       DBG("%d: %s", i, ovl->name);
-               }
-       }
-}
-
-/* create encoders for each manager */
-static int create_encoder(struct drm_device *dev,
-               struct omap_overlay_manager *mgr)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
-
-       if (!encoder) {
-               dev_err(dev->dev, "could not create encoder: %s\n",
-                               mgr->name);
-               return -ENOMEM;
-       }
-
-       BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
-
-       priv->encoders[priv->num_encoders++] = encoder;
-
-       return 0;
-}
-
-/* create connectors for each display device */
-static int create_connector(struct drm_device *dev,
-               struct omap_dss_device *dssdev)
+static int omap_modeset_init(struct drm_device *dev)
 {
        struct omap_drm_private *priv = dev->dev_private;
-       static struct notifier_block *notifier;
-       struct drm_connector *connector;
-       int j;
-
-       if (!dssdev->driver) {
-               dev_warn(dev->dev, "%s has no driver.. skipping it\n",
-                               dssdev->name);
-               return 0;
-       }
+       struct omap_dss_device *dssdev = NULL;
+       int num_ovls = dss_feat_get_num_ovls();
+       int id;
 
-       if (!(dssdev->driver->get_timings ||
-                               dssdev->driver->read_edid)) {
-               dev_warn(dev->dev, "%s driver does not support "
-                       "get_timings or read_edid.. skipping it!\n",
-                       dssdev->name);
-               return 0;
-       }
+       drm_mode_config_init(dev);
 
-       connector = omap_connector_init(dev,
-                       get_connector_type(dssdev), dssdev);
+       omap_drm_irq_install(dev);
 
-       if (!connector) {
-               dev_err(dev->dev, "could not create connector: %s\n",
-                               dssdev->name);
-               return -ENOMEM;
-       }
-
-       BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+       /*
+        * Create private planes and CRTCs for the last NUM_CRTCs overlay
+        * plus manager:
+        */
+       for (id = 0; id < min(num_crtc, num_ovls); id++) {
+               struct drm_plane *plane;
+               struct drm_crtc *crtc;
 
-       priv->connectors[priv->num_connectors++] = connector;
+               plane = omap_plane_init(dev, id, true);
+               crtc = omap_crtc_init(dev, plane, pipe2chan(id), id);
 
-#if 0 /* enable when dss2 supports hotplug */
-       notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
-       notifier->notifier_call = omap_drm_notifier;
-       omap_dss_add_notify(dssdev, notifier);
-#else
-       notifier = NULL;
-#endif
+               BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+               priv->crtcs[id] = crtc;
+               priv->num_crtcs++;
 
-       for (j = 0; j < priv->num_encoders; j++) {
-               struct omap_overlay_manager *mgr =
-                       omap_encoder_get_manager(priv->encoders[j]);
-               if (mgr->get_device(mgr) == dssdev) {
-                       drm_mode_connector_attach_encoder(connector,
-                                       priv->encoders[j]);
-               }
+               priv->planes[id] = plane;
+               priv->num_planes++;
        }
 
-       return 0;
-}
-
-/* create up to max_overlays CRTCs mapping to overlays.. by default,
- * connect the overlays to different managers/encoders, giving priority
- * to encoders connected to connectors with a detected connection
- */
-static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
-               int *j, unsigned int connected_connectors)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct omap_overlay_manager *mgr = NULL;
-       struct drm_crtc *crtc;
-
-       /* find next best connector, ones with detected connection first
+       /*
+        * Create normal planes for the remaining overlays:
         */
-       while (*j < priv->num_connectors && !mgr) {
-               if (connected_connectors & (1 << *j)) {
-                       struct drm_encoder *encoder =
-                               omap_connector_attached_encoder(
-                                               priv->connectors[*j]);
-                       if (encoder)
-                               mgr = omap_encoder_get_manager(encoder);
+       for (; id < num_ovls; id++) {
+               struct drm_plane *plane = omap_plane_init(dev, id, false);
 
-               }
-               (*j)++;
+               BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+               priv->planes[priv->num_planes++] = plane;
        }
 
-       /* if we couldn't find another connected connector, lets start
-        * looking at the unconnected connectors:
-        *
-        * note: it might not be immediately apparent, but thanks to
-        * the !mgr check in both this loop and the one above, the only
-        * way to enter this loop is with *j == priv->num_connectors,
-        * so idx can never go negative.
-        */
-       while (*j < 2 * priv->num_connectors && !mgr) {
-               int idx = *j - priv->num_connectors;
-               if (!(connected_connectors & (1 << idx))) {
-                       struct drm_encoder *encoder =
-                               omap_connector_attached_encoder(
-                                               priv->connectors[idx]);
-                       if (encoder)
-                               mgr = omap_encoder_get_manager(encoder);
+       for_each_dss_dev(dssdev) {
+               struct drm_connector *connector;
+               struct drm_encoder *encoder;
 
+               if (!dssdev->driver) {
+                       dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+                                       dssdev->name);
+                       return 0;
                }
-               (*j)++;
-       }
-
-       crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
-
-       if (!crtc) {
-               dev_err(dev->dev, "could not create CRTC: %s\n",
-                               ovl->name);
-               return -ENOMEM;
-       }
 
-       BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
-
-       priv->crtcs[priv->num_crtcs++] = crtc;
-
-       return 0;
-}
-
-static int create_plane(struct drm_device *dev, struct omap_overlay *ovl,
-               unsigned int possible_crtcs)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct drm_plane *plane =
-                       omap_plane_init(dev, ovl, possible_crtcs, false);
-
-       if (!plane) {
-               dev_err(dev->dev, "could not create plane: %s\n",
-                               ovl->name);
-               return -ENOMEM;
-       }
-
-       BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
-
-       priv->planes[priv->num_planes++] = plane;
-
-       return 0;
-}
-
-static int match_dev_name(struct omap_dss_device *dssdev, void *data)
-{
-       return !strcmp(dssdev->name, data);
-}
-
-static unsigned int detect_connectors(struct drm_device *dev)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       unsigned int connected_connectors = 0;
-       int i;
-
-       for (i = 0; i < priv->num_connectors; i++) {
-               struct drm_connector *connector = priv->connectors[i];
-               if (omap_connector_detect(connector, true) ==
-                               connector_status_connected) {
-                       connected_connectors |= (1 << i);
+               if (!(dssdev->driver->get_timings ||
+                                       dssdev->driver->read_edid)) {
+                       dev_warn(dev->dev, "%s driver does not support "
+                               "get_timings or read_edid.. skipping it!\n",
+                               dssdev->name);
+                       return 0;
                }
-       }
-
-       return connected_connectors;
-}
 
-static int omap_modeset_init(struct drm_device *dev)
-{
-       const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
-       struct omap_kms_platform_data *kms_pdata = NULL;
-       struct omap_drm_private *priv = dev->dev_private;
-       struct omap_dss_device *dssdev = NULL;
-       int i, j;
-       unsigned int connected_connectors = 0;
+               encoder = omap_encoder_init(dev, dssdev);
 
-       drm_mode_config_init(dev);
-
-       if (pdata && pdata->kms_pdata) {
-               kms_pdata = pdata->kms_pdata;
-
-               /* if platform data is provided by the board file, use it to
-                * control which overlays, managers, and devices we own.
-                */
-               for (i = 0; i < kms_pdata->mgr_cnt; i++) {
-                       struct omap_overlay_manager *mgr =
-                               omap_dss_get_overlay_manager(
-                                               kms_pdata->mgr_ids[i]);
-                       create_encoder(dev, mgr);
-               }
-
-               for (i = 0; i < kms_pdata->dev_cnt; i++) {
-                       struct omap_dss_device *dssdev =
-                               omap_dss_find_device(
-                                       (void *)kms_pdata->dev_names[i],
-                                       match_dev_name);
-                       if (!dssdev) {
-                               dev_warn(dev->dev, "no such dssdev: %s\n",
-                                               kms_pdata->dev_names[i]);
-                               continue;
-                       }
-                       create_connector(dev, dssdev);
+               if (!encoder) {
+                       dev_err(dev->dev, "could not create encoder: %s\n",
+                                       dssdev->name);
+                       return -ENOMEM;
                }
 
-               connected_connectors = detect_connectors(dev);
+               connector = omap_connector_init(dev,
+                               get_connector_type(dssdev), dssdev, encoder);
 
-               j = 0;
-               for (i = 0; i < kms_pdata->ovl_cnt; i++) {
-                       struct omap_overlay *ovl =
-                               omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
-                       create_crtc(dev, ovl, &j, connected_connectors);
+               if (!connector) {
+                       dev_err(dev->dev, "could not create connector: %s\n",
+                                       dssdev->name);
+                       return -ENOMEM;
                }
 
-               for (i = 0; i < kms_pdata->pln_cnt; i++) {
-                       struct omap_overlay *ovl =
-                               omap_dss_get_overlay(kms_pdata->pln_ids[i]);
-                       create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
-               }
-       } else {
-               /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
-                * to make educated guesses about everything else
-                */
-               int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+               BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+               BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
 
-               for (i = 0; i < omap_dss_get_num_overlay_managers(); i++)
-                       create_encoder(dev, omap_dss_get_overlay_manager(i));
-
-               for_each_dss_dev(dssdev) {
-                       create_connector(dev, dssdev);
-               }
+               priv->encoders[priv->num_encoders++] = encoder;
+               priv->connectors[priv->num_connectors++] = connector;
 
-               connected_connectors = detect_connectors(dev);
+               drm_mode_connector_attach_encoder(connector, encoder);
 
-               j = 0;
-               for (i = 0; i < max_overlays; i++) {
-                       create_crtc(dev, omap_dss_get_overlay(i),
-                                       &j, connected_connectors);
-               }
-
-               /* use any remaining overlays as drm planes */
-               for (; i < omap_dss_get_num_overlays(); i++) {
-                       struct omap_overlay *ovl = omap_dss_get_overlay(i);
-                       create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
+               /* figure out which crtc's we can connect the encoder to: */
+               encoder->possible_crtcs = 0;
+               for (id = 0; id < priv->num_crtcs; id++) {
+                       enum omap_dss_output_id supported_outputs =
+                                       dss_feat_get_supported_outputs(pipe2chan(id));
+                       if (supported_outputs & dssdev->output->id)
+                               encoder->possible_crtcs |= (1 << id);
                }
        }
 
-       /* for now keep the mapping of CRTCs and encoders static.. */
-       for (i = 0; i < priv->num_encoders; i++) {
-               struct drm_encoder *encoder = priv->encoders[i];
-               struct omap_overlay_manager *mgr =
-                               omap_encoder_get_manager(encoder);
-
-               encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
-
-               DBG("%s: possible_crtcs=%08x", mgr->name,
-                                       encoder->possible_crtcs);
-       }
-
-       dump_video_chains();
-
        dev->mode_config.min_width = 32;
        dev->mode_config.min_height = 32;
 
@@ -450,7 +229,7 @@ static int ioctl_gem_new(struct drm_device *dev, void *data,
                struct drm_file *file_priv)
 {
        struct drm_omap_gem_new *args = data;
-       DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+       VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
                        args->size.bytes, args->flags);
        return omap_gem_new_handle(dev, file_priv, args->size,
                        args->flags, &args->handle);
@@ -510,7 +289,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
        struct drm_gem_object *obj;
        int ret = 0;
 
-       DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+       VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
        if (!obj)
@@ -565,14 +344,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
 
        dev->dev_private = priv;
 
-       ret = omapdss_compat_init();
-       if (ret) {
-               dev_err(dev->dev, "coult not init omapdss\n");
-               dev->dev_private = NULL;
-               kfree(priv);
-               return ret;
-       }
-
        priv->wq = alloc_ordered_workqueue("omapdrm", 0);
 
        INIT_LIST_HEAD(&priv->obj_list);
@@ -584,10 +355,13 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
                dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
                dev->dev_private = NULL;
                kfree(priv);
-               omapdss_compat_uninit();
                return ret;
        }
 
+       ret = drm_vblank_init(dev, priv->num_crtcs);
+       if (ret)
+               dev_warn(dev->dev, "could not init vblank\n");
+
        priv->fbdev = omap_fbdev_init(dev);
        if (!priv->fbdev) {
                dev_warn(dev->dev, "omap_fbdev_init failed\n");
@@ -596,10 +370,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
 
        drm_kms_helper_poll_init(dev);
 
-       ret = drm_vblank_init(dev, priv->num_crtcs);
-       if (ret)
-               dev_warn(dev->dev, "could not init vblank\n");
-
        return 0;
 }
 
@@ -609,8 +379,9 @@ static int dev_unload(struct drm_device *dev)
 
        DBG("unload: dev=%p", dev);
 
-       drm_vblank_cleanup(dev);
        drm_kms_helper_poll_fini(dev);
+       drm_vblank_cleanup(dev);
+       omap_drm_irq_uninstall(dev);
 
        omap_fbdev_free(dev);
        omap_modeset_free(dev);
@@ -619,8 +390,6 @@ static int dev_unload(struct drm_device *dev)
        flush_workqueue(priv->wq);
        destroy_workqueue(priv->wq);
 
-       omapdss_compat_uninit();
-
        kfree(dev->dev_private);
        dev->dev_private = NULL;
 
@@ -680,7 +449,9 @@ static void dev_lastclose(struct drm_device *dev)
                }
        }
 
+       mutex_lock(&dev->mode_config.mutex);
        ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+       mutex_unlock(&dev->mode_config.mutex);
        if (ret)
                DBG("failed to restore crtc mode");
 }
@@ -695,60 +466,6 @@ static void dev_postclose(struct drm_device *dev, struct drm_file *file)
        DBG("postclose: dev=%p, file=%p", dev, file);
 }
 
-/**
- * enable_vblank - enable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Enable vblank interrupts for @crtc.  If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- *
- * RETURNS
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
-static int dev_enable_vblank(struct drm_device *dev, int crtc)
-{
-       DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
-       return 0;
-}
-
-/**
- * disable_vblank - disable vblank interrupt events
- * @dev: DRM device
- * @crtc: which irq to enable
- *
- * Disable vblank interrupts for @crtc.  If the device doesn't have
- * a hardware vblank counter, this routine should be a no-op, since
- * interrupts will have to stay on to keep the count accurate.
- */
-static void dev_disable_vblank(struct drm_device *dev, int crtc)
-{
-       DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
-}
-
-static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
-{
-       return IRQ_HANDLED;
-}
-
-static void dev_irq_preinstall(struct drm_device *dev)
-{
-       DBG("irq_preinstall: dev=%p", dev);
-}
-
-static int dev_irq_postinstall(struct drm_device *dev)
-{
-       DBG("irq_postinstall: dev=%p", dev);
-       return 0;
-}
-
-static void dev_irq_uninstall(struct drm_device *dev)
-{
-       DBG("irq_uninstall: dev=%p", dev);
-}
-
 static const struct vm_operations_struct omap_gem_vm_ops = {
        .fault = omap_gem_fault,
        .open = drm_gem_vm_open,
@@ -778,12 +495,12 @@ static struct drm_driver omap_drm_driver = {
                .preclose = dev_preclose,
                .postclose = dev_postclose,
                .get_vblank_counter = drm_vblank_count,
-               .enable_vblank = dev_enable_vblank,
-               .disable_vblank = dev_disable_vblank,
-               .irq_preinstall = dev_irq_preinstall,
-               .irq_postinstall = dev_irq_postinstall,
-               .irq_uninstall = dev_irq_uninstall,
-               .irq_handler = dev_irq_handler,
+               .enable_vblank = omap_irq_enable_vblank,
+               .disable_vblank = omap_irq_disable_vblank,
+               .irq_preinstall = omap_irq_preinstall,
+               .irq_postinstall = omap_irq_postinstall,
+               .irq_uninstall = omap_irq_uninstall,
+               .irq_handler = omap_irq_handler,
 #ifdef CONFIG_DEBUG_FS
                .debugfs_init = omap_debugfs_init,
                .debugfs_cleanup = omap_debugfs_cleanup,
index 1d4aea53b75da20e44be90b4a32e589a4a067785..cd1f22b0b124ddbf9eb1eff0efe5b77ddb7fd061 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_data/omap_drm.h>
 #include "omap_drm.h"
 
+
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
 
  */
 #define MAX_MAPPERS 2
 
+/* parameters which describe (unrotated) coordinates of scanout within a fb: */
+struct omap_drm_window {
+       uint32_t rotation;
+       int32_t  crtc_x, crtc_y;                /* signed because can be offscreen */
+       uint32_t crtc_w, crtc_h;
+       uint32_t src_x, src_y;
+       uint32_t src_w, src_h;
+};
+
+/* Once GO bit is set, we can't make further updates to shadowed registers
+ * until the GO bit is cleared.  So various parts in the kms code that need
+ * to update shadowed registers queue up a pair of callbacks, pre_apply
+ * which is called before setting GO bit, and post_apply that is called
+ * after GO bit is cleared.  The crtc manages the queuing, and everyone
+ * else goes thru omap_crtc_apply() using these callbacks so that the
+ * code which has to deal w/ GO bit state is centralized.
+ */
+struct omap_drm_apply {
+       struct list_head pending_node, queued_node;
+       bool queued;
+       void (*pre_apply)(struct omap_drm_apply *apply);
+       void (*post_apply)(struct omap_drm_apply *apply);
+};
+
+/* For transiently registering for different DSS irqs that various parts
+ * of the KMS code need during setup/configuration.  We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct omap_drm_irq {
+       struct list_head node;
+       uint32_t irqmask;
+       bool registered;
+       void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
+};
+
+/* For KMS code that needs to wait for a certain # of IRQs:
+ */
+struct omap_irq_wait;
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+               uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+               unsigned long timeout);
+
 struct omap_drm_private {
        uint32_t omaprev;
 
@@ -58,6 +104,7 @@ struct omap_drm_private {
 
        struct workqueue_struct *wq;
 
+       /* list of GEM objects: */
        struct list_head obj_list;
 
        bool has_dmm;
@@ -65,6 +112,11 @@ struct omap_drm_private {
        /* properties: */
        struct drm_property *rotation_prop;
        struct drm_property *zorder_prop;
+
+       /* irq handling: */
+       struct list_head irq_list;    /* list of omap_drm_irq */
+       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+       struct omap_drm_irq error_handler;
 };
 
 /* this should probably be in drm-core to standardize amongst drivers */
@@ -75,15 +127,6 @@ struct omap_drm_private {
 #define DRM_REFLECT_X  4
 #define DRM_REFLECT_Y  5
 
-/* parameters which describe (unrotated) coordinates of scanout within a fb: */
-struct omap_drm_window {
-       uint32_t rotation;
-       int32_t  crtc_x, crtc_y;                /* signed because can be offscreen */
-       uint32_t crtc_w, crtc_h;
-       uint32_t src_x, src_y;
-       uint32_t src_w, src_h;
-};
-
 #ifdef CONFIG_DEBUG_FS
 int omap_debugfs_init(struct drm_minor *minor);
 void omap_debugfs_cleanup(struct drm_minor *minor);
@@ -92,23 +135,36 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
 #endif
 
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc);
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc);
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS);
+void omap_irq_preinstall(struct drm_device *dev);
+int omap_irq_postinstall(struct drm_device *dev);
+void omap_irq_uninstall(struct drm_device *dev);
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
+int omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
 struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
 void omap_fbdev_free(struct drm_device *dev);
 
+const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+int omap_crtc_apply(struct drm_crtc *crtc,
+               struct omap_drm_apply *apply);
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
-               struct omap_overlay *ovl, int id);
+               struct drm_plane *plane, enum omap_channel channel, int id);
 
 struct drm_plane *omap_plane_init(struct drm_device *dev,
-               struct omap_overlay *ovl, unsigned int possible_crtcs,
-               bool priv);
+               int plane_id, bool private_plane);
 int omap_plane_dpms(struct drm_plane *plane, int mode);
 int omap_plane_mode_set(struct drm_plane *plane,
                struct drm_crtc *crtc, struct drm_framebuffer *fb,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
                uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h);
-void omap_plane_on_endwin(struct drm_plane *plane,
+               uint32_t src_w, uint32_t src_h,
                void (*fxn)(void *), void *arg);
 void omap_plane_install_properties(struct drm_plane *plane,
                struct drm_mode_object *obj);
@@ -116,21 +172,25 @@ int omap_plane_set_property(struct drm_plane *plane,
                struct drm_property *property, uint64_t val);
 
 struct drm_encoder *omap_encoder_init(struct drm_device *dev,
-               struct omap_overlay_manager *mgr);
-struct omap_overlay_manager *omap_encoder_get_manager(
+               struct omap_dss_device *dssdev);
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
+int omap_encoder_update(struct drm_encoder *encoder,
+               struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+               int connector_type, struct omap_dss_device *dssdev,
                struct drm_encoder *encoder);
 struct drm_encoder *omap_connector_attached_encoder(
                struct drm_connector *connector);
-enum drm_connector_status omap_connector_detect(
-               struct drm_connector *connector, bool force);
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
-               int connector_type, struct omap_dss_device *dssdev);
-void omap_connector_mode_set(struct drm_connector *connector,
-               struct drm_display_mode *mode);
 void omap_connector_flush(struct drm_connector *connector,
                int x, int y, int w, int h);
 
+void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+               struct omap_video_timings *timings);
+void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+               struct drm_display_mode *mode);
+
 uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
                uint32_t max_formats, enum omap_color_mode supported_modes);
 struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
@@ -207,6 +267,40 @@ static inline int align_pitch(int pitch, int width, int bpp)
        return ALIGN(pitch, 8 * bytespp);
 }
 
+static inline enum omap_channel pipe2chan(int pipe)
+{
+       int num_mgrs = dss_feat_get_num_mgrs();
+
+       /*
+        * We usually don't want to create a CRTC for each manager,
+        * at least not until we have a way to expose private planes
+        * to userspace.  Otherwise there would not be enough video
+        * pipes left for drm planes.  The higher #'d managers tend
+        * to have more features so start in reverse order.
+        */
+       return num_mgrs - pipe - 1;
+}
+
+/* map crtc to vblank mask */
+static inline uint32_t pipe2vbl(int crtc)
+{
+       enum omap_channel channel = pipe2chan(crtc);
+       return dispc_mgr_get_vsync_irq(channel);
+}
+
+static inline int crtc2pipe(struct drm_device *dev, struct drm_crtc *crtc)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(priv->crtcs); i++)
+               if (priv->crtcs[i] == crtc)
+                       return i;
+
+       BUG();  /* bogus CRTC ptr */
+       return -1;
+}
+
 /* should these be made into common util helpers?
  */
 
index 5341d5e3e3179e30b2bbe8540b12cc935843a2b7..e053160d2db38c9fd6d77bea4971e22635ef18ad 100644 (file)
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
 
+#include <linux/list.h>
+
+
 /*
  * encoder funcs
  */
 
 #define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
 
+/* The encoder and connector both map to same dssdev.. the encoder
+ * handles the 'active' parts, ie. anything the modifies the state
+ * of the hw, and the connector handles the 'read-only' parts, like
+ * detecting connection and reading edid.
+ */
 struct omap_encoder {
        struct drm_encoder base;
-       struct omap_overlay_manager *mgr;
+       struct omap_dss_device *dssdev;
 };
 
 static void omap_encoder_destroy(struct drm_encoder *encoder)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       DBG("%s", omap_encoder->mgr->name);
        drm_encoder_cleanup(encoder);
        kfree(omap_encoder);
 }
 
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+       .destroy = omap_encoder_destroy,
+};
+
+/*
+ * The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
+ * order.. the easiest way to work around this for now is to make all
+ * the encoder-helper's no-op's and have the omap_crtc code take care
+ * of the sequencing and call us in the right points.
+ *
+ * Eventually to handle connecting CRTCs to different encoders properly,
+ * either the CRTC helpers need to change or we need to replace
+ * drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
+ * that.
+ */
+
 static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       DBG("%s: %d", omap_encoder->mgr->name, mode);
 }
 
 static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
                                  const struct drm_display_mode *mode,
                                  struct drm_display_mode *adjusted_mode)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       DBG("%s", omap_encoder->mgr->name);
        return true;
 }
 
@@ -60,47 +79,16 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
                                struct drm_display_mode *mode,
                                struct drm_display_mode *adjusted_mode)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct omap_drm_private *priv = dev->dev_private;
-       int i;
-
-       mode = adjusted_mode;
-
-       DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
-                       mode->hdisplay, mode->vdisplay);
-
-       for (i = 0; i < priv->num_connectors; i++) {
-               struct drm_connector *connector = priv->connectors[i];
-               if (connector->encoder == encoder)
-                       omap_connector_mode_set(connector, mode);
-
-       }
 }
 
 static void omap_encoder_prepare(struct drm_encoder *encoder)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_encoder_helper_funcs *encoder_funcs =
-                               encoder->helper_private;
-       DBG("%s", omap_encoder->mgr->name);
-       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
 }
 
 static void omap_encoder_commit(struct drm_encoder *encoder)
 {
-       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_encoder_helper_funcs *encoder_funcs =
-                               encoder->helper_private;
-       DBG("%s", omap_encoder->mgr->name);
-       omap_encoder->mgr->apply(omap_encoder->mgr);
-       encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 }
 
-static const struct drm_encoder_funcs omap_encoder_funcs = {
-       .destroy = omap_encoder_destroy,
-};
-
 static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
        .dpms = omap_encoder_dpms,
        .mode_fixup = omap_encoder_mode_fixup,
@@ -109,23 +97,54 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
        .commit = omap_encoder_commit,
 };
 
-struct omap_overlay_manager *omap_encoder_get_manager(
-               struct drm_encoder *encoder)
+/*
+ * Instead of relying on the helpers for modeset, the omap_crtc code
+ * calls these functions in the proper sequence.
+ */
+
+int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
 {
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       return omap_encoder->mgr;
+       struct omap_dss_device *dssdev = omap_encoder->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+
+       if (enabled) {
+               return dssdrv->enable(dssdev);
+       } else {
+               dssdrv->disable(dssdev);
+               return 0;
+       }
+}
+
+int omap_encoder_update(struct drm_encoder *encoder,
+               struct omap_overlay_manager *mgr,
+               struct omap_video_timings *timings)
+{
+       struct drm_device *dev = encoder->dev;
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct omap_dss_device *dssdev = omap_encoder->dssdev;
+       struct omap_dss_driver *dssdrv = dssdev->driver;
+       int ret;
+
+       dssdev->output->manager = mgr;
+
+       ret = dssdrv->check_timings(dssdev, timings);
+       if (ret) {
+               dev_err(dev->dev, "could not set timings: %d\n", ret);
+               return ret;
+       }
+
+       dssdrv->set_timings(dssdev, timings);
+
+       return 0;
 }
 
 /* initialize encoder */
 struct drm_encoder *omap_encoder_init(struct drm_device *dev,
-               struct omap_overlay_manager *mgr)
+               struct omap_dss_device *dssdev)
 {
        struct drm_encoder *encoder = NULL;
        struct omap_encoder *omap_encoder;
-       struct omap_overlay_manager_info info;
-       int ret;
-
-       DBG("%s", mgr->name);
 
        omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
        if (!omap_encoder) {
@@ -133,33 +152,14 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
                goto fail;
        }
 
-       omap_encoder->mgr = mgr;
+       omap_encoder->dssdev = dssdev;
+
        encoder = &omap_encoder->base;
 
        drm_encoder_init(dev, encoder, &omap_encoder_funcs,
                         DRM_MODE_ENCODER_TMDS);
        drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
 
-       mgr->get_manager_info(mgr, &info);
-
-       /* TODO: fix hard-coded setup.. */
-       info.default_color = 0x00000000;
-       info.trans_key = 0x00000000;
-       info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
-       info.trans_enabled = false;
-
-       ret = mgr->set_manager_info(mgr, &info);
-       if (ret) {
-               dev_err(dev->dev, "could not set manager info\n");
-               goto fail;
-       }
-
-       ret = mgr->apply(mgr);
-       if (ret) {
-               dev_err(dev->dev, "could not apply\n");
-               goto fail;
-       }
-
        return encoder;
 
 fail:
index ea3840038250391bb3e3e0c897fc07a246b67732..b6c5b5c6c8c53dbd39e84baa91cd43e1b81ebfb7 100644 (file)
@@ -194,7 +194,7 @@ struct dma_buf_ops omap_dmabuf_ops = {
 struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
                struct drm_gem_object *obj, int flags)
 {
-       return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, 0600);
+       return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags);
 }
 
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/staging/omapdrm/omap_irq.c
new file mode 100644 (file)
index 0000000..2629ba7
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * drivers/staging/omapdrm/omap_irq.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void omap_irq_error_handler(struct omap_drm_irq *irq,
+               uint32_t irqstatus)
+{
+       DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+/* call with list_lock and dispc runtime held */
+static void omap_irq_update(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_drm_irq *irq;
+       uint32_t irqmask = priv->vblank_mask;
+
+       BUG_ON(!spin_is_locked(&list_lock));
+
+       list_for_each_entry(irq, &priv->irq_list, node)
+               irqmask |= irq->irqmask;
+
+       DBG("irqmask=%08x", irqmask);
+
+       dispc_write_irqenable(irqmask);
+       dispc_read_irqenable();        /* flush posted write */
+}
+
+void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+
+       if (!WARN_ON(irq->registered)) {
+               irq->registered = true;
+               list_add(&irq->node, &priv->irq_list);
+               omap_irq_update(dev);
+       }
+
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+}
+
+void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+{
+       unsigned long flags;
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+
+       if (!WARN_ON(!irq->registered)) {
+               irq->registered = false;
+               list_del(&irq->node);
+               omap_irq_update(dev);
+       }
+
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+}
+
+struct omap_irq_wait {
+       struct omap_drm_irq irq;
+       int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_irq_wait *wait =
+                       container_of(irq, struct omap_irq_wait, irq);
+       wait->count--;
+       wake_up_all(&wait_event);
+}
+
+struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
+               uint32_t irqmask, int count)
+{
+       struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+       wait->irq.irq = wait_irq;
+       wait->irq.irqmask = irqmask;
+       wait->count = count;
+       omap_irq_register(dev, &wait->irq);
+       return wait;
+}
+
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+               unsigned long timeout)
+{
+       int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
+       omap_irq_unregister(dev, &wait->irq);
+       kfree(wait);
+       if (ret == 0)
+               return -1;
+       return 0;
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+int omap_irq_enable_vblank(struct drm_device *dev, int crtc)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       DBG("dev=%p, crtc=%d", dev, crtc);
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+       priv->vblank_mask |= pipe2vbl(crtc);
+       omap_irq_update(dev);
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+
+       return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc.  If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+void omap_irq_disable_vblank(struct drm_device *dev, int crtc)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+
+       DBG("dev=%p, crtc=%d", dev, crtc);
+
+       dispc_runtime_get();
+       spin_lock_irqsave(&list_lock, flags);
+       priv->vblank_mask &= ~pipe2vbl(crtc);
+       omap_irq_update(dev);
+       spin_unlock_irqrestore(&list_lock, flags);
+       dispc_runtime_put();
+}
+
+irqreturn_t omap_irq_handler(DRM_IRQ_ARGS)
+{
+       struct drm_device *dev = (struct drm_device *) arg;
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_drm_irq *handler, *n;
+       unsigned long flags;
+       unsigned int id;
+       u32 irqstatus;
+
+       irqstatus = dispc_read_irqstatus();
+       dispc_clear_irqstatus(irqstatus);
+       dispc_read_irqstatus();        /* flush posted write */
+
+       VERB("irqs: %08x", irqstatus);
+
+       for (id = 0; id < priv->num_crtcs; id++)
+               if (irqstatus & pipe2vbl(id))
+                       drm_handle_vblank(dev, id);
+
+       spin_lock_irqsave(&list_lock, flags);
+       list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
+               if (handler->irqmask & irqstatus) {
+                       spin_unlock_irqrestore(&list_lock, flags);
+                       handler->irq(handler, handler->irqmask & irqstatus);
+                       spin_lock_irqsave(&list_lock, flags);
+               }
+       }
+       spin_unlock_irqrestore(&list_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+void omap_irq_preinstall(struct drm_device *dev)
+{
+       DBG("dev=%p", dev);
+       dispc_runtime_get();
+       dispc_clear_irqstatus(0xffffffff);
+       dispc_runtime_put();
+}
+
+int omap_irq_postinstall(struct drm_device *dev)
+{
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_drm_irq *error_handler = &priv->error_handler;
+
+       DBG("dev=%p", dev);
+
+       INIT_LIST_HEAD(&priv->irq_list);
+
+       error_handler->irq = omap_irq_error_handler;
+       error_handler->irqmask = DISPC_IRQ_OCP_ERR;
+
+       /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
+        * we just need to ignore it while enabling tv-out
+        */
+       error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
+
+       omap_irq_register(dev, error_handler);
+
+       return 0;
+}
+
+void omap_irq_uninstall(struct drm_device *dev)
+{
+       DBG("dev=%p", dev);
+       // TODO prolly need to call drm_irq_uninstall() somewhere too
+}
+
+/*
+ * We need a special version, instead of just using drm_irq_install(),
+ * because we need to register the irq via omapdss.  Once omapdss and
+ * omapdrm are merged together we can assign the dispc hwmod data to
+ * ourselves and drop these and just use drm_irq_{install,uninstall}()
+ */
+
+int omap_drm_irq_install(struct drm_device *dev)
+{
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (dev->irq_enabled) {
+               mutex_unlock(&dev->struct_mutex);
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       /* Before installing handler */
+       if (dev->driver->irq_preinstall)
+               dev->driver->irq_preinstall(dev);
+
+       ret = dispc_request_irq(dev->driver->irq_handler, dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
+       /* After installing handler */
+       if (dev->driver->irq_postinstall)
+               ret = dev->driver->irq_postinstall(dev);
+
+       if (ret < 0) {
+               mutex_lock(&dev->struct_mutex);
+               dev->irq_enabled = 0;
+               mutex_unlock(&dev->struct_mutex);
+               dispc_free_irq(dev);
+       }
+
+       return ret;
+}
+
+int omap_drm_irq_uninstall(struct drm_device *dev)
+{
+       unsigned long irqflags;
+       int irq_enabled, i;
+
+       mutex_lock(&dev->struct_mutex);
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * Wake up any waiters so they don't hang.
+        */
+       if (dev->num_crtcs) {
+               spin_lock_irqsave(&dev->vbl_lock, irqflags);
+               for (i = 0; i < dev->num_crtcs; i++) {
+                       DRM_WAKEUP(&dev->vbl_queue[i]);
+                       dev->vblank_enabled[i] = 0;
+                       dev->last_vblank[i] =
+                               dev->driver->get_vblank_counter(dev, i);
+               }
+               spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+       }
+
+       if (!irq_enabled)
+               return -EINVAL;
+
+       if (dev->driver->irq_uninstall)
+               dev->driver->irq_uninstall(dev);
+
+       dispc_free_irq(dev);
+
+       return 0;
+}
index 2a8e5bab49c908aa2ef3edd9da4ded8f6074dd67..bb989d7f026dcafd62f1aba1f5d1877ea34d3d9b 100644 (file)
@@ -41,12 +41,14 @@ struct callback {
 
 struct omap_plane {
        struct drm_plane base;
-       struct omap_overlay *ovl;
+       int id;  /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
+       const char *name;
        struct omap_overlay_info info;
+       struct omap_drm_apply apply;
 
        /* position/orientation of scanout within the fb: */
        struct omap_drm_window win;
-
+       bool enabled;
 
        /* last fb that we pinned: */
        struct drm_framebuffer *pinned_fb;
@@ -54,189 +56,15 @@ struct omap_plane {
        uint32_t nformats;
        uint32_t formats[32];
 
-       /* for synchronizing access to unpins fifo */
-       struct mutex unpin_mutex;
+       struct omap_drm_irq error_irq;
 
-       /* set of bo's pending unpin until next END_WIN irq */
+       /* set of bo's pending unpin until next post_apply() */
        DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
-       int num_unpins, pending_num_unpins;
-
-       /* for deferred unpin when we need to wait for scanout complete irq */
-       struct work_struct work;
-
-       /* callback on next endwin irq */
-       struct callback endwin;
-};
 
-/* map from ovl->id to the irq we are interested in for scanout-done */
-static const uint32_t id2irq[] = {
-               [OMAP_DSS_GFX]    = DISPC_IRQ_GFX_END_WIN,
-               [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_END_WIN,
-               [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_END_WIN,
-               [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_END_WIN,
+       // XXX maybe get rid of this and handle vblank in crtc too?
+       struct callback apply_done_cb;
 };
 
-static void dispc_isr(void *arg, uint32_t mask)
-{
-       struct drm_plane *plane = arg;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_drm_private *priv = plane->dev->dev_private;
-
-       omap_dispc_unregister_isr(dispc_isr, plane,
-                       id2irq[omap_plane->ovl->id]);
-
-       queue_work(priv->wq, &omap_plane->work);
-}
-
-static void unpin_worker(struct work_struct *work)
-{
-       struct omap_plane *omap_plane =
-                       container_of(work, struct omap_plane, work);
-       struct callback endwin;
-
-       mutex_lock(&omap_plane->unpin_mutex);
-       DBG("unpinning %d of %d", omap_plane->num_unpins,
-                       omap_plane->num_unpins + omap_plane->pending_num_unpins);
-       while (omap_plane->num_unpins > 0) {
-               struct drm_gem_object *bo = NULL;
-               int ret = kfifo_get(&omap_plane->unpin_fifo, &bo);
-               WARN_ON(!ret);
-               omap_gem_put_paddr(bo);
-               drm_gem_object_unreference_unlocked(bo);
-               omap_plane->num_unpins--;
-       }
-       endwin = omap_plane->endwin;
-       omap_plane->endwin.fxn = NULL;
-       mutex_unlock(&omap_plane->unpin_mutex);
-
-       if (endwin.fxn)
-               endwin.fxn(endwin.arg);
-}
-
-static void install_irq(struct drm_plane *plane)
-{
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       int ret;
-
-       ret = omap_dispc_register_isr(dispc_isr, plane, id2irq[ovl->id]);
-
-       /*
-        * omapdss has upper limit on # of registered irq handlers,
-        * which we shouldn't hit.. but if we do the limit should
-        * be raised or bad things happen:
-        */
-       WARN_ON(ret == -EBUSY);
-}
-
-/* push changes down to dss2 */
-static int commit(struct drm_plane *plane)
-{
-       struct drm_device *dev = plane->dev;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       struct omap_overlay_info *info = &omap_plane->info;
-       int ret;
-
-       DBG("%s", ovl->name);
-       DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
-                       info->out_height, info->screen_width);
-       DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
-                       info->paddr, info->p_uv_addr);
-
-       /* NOTE: do we want to do this at all here, or just wait
-        * for dpms(ON) since other CRTC's may not have their mode
-        * set yet, so fb dimensions may still change..
-        */
-       ret = ovl->set_overlay_info(ovl, info);
-       if (ret) {
-               dev_err(dev->dev, "could not set overlay info\n");
-               return ret;
-       }
-
-       mutex_lock(&omap_plane->unpin_mutex);
-       omap_plane->num_unpins += omap_plane->pending_num_unpins;
-       omap_plane->pending_num_unpins = 0;
-       mutex_unlock(&omap_plane->unpin_mutex);
-
-       /* our encoder doesn't necessarily get a commit() after this, in
-        * particular in the dpms() and mode_set_base() cases, so force the
-        * manager to update:
-        *
-        * could this be in the encoder somehow?
-        */
-       if (ovl->manager) {
-               ret = ovl->manager->apply(ovl->manager);
-               if (ret) {
-                       dev_err(dev->dev, "could not apply settings\n");
-                       return ret;
-               }
-
-               /*
-                * NOTE: really this should be atomic w/ mgr->apply() but
-                * omapdss does not expose such an API
-                */
-               if (omap_plane->num_unpins > 0)
-                       install_irq(plane);
-
-       } else {
-               struct omap_drm_private *priv = dev->dev_private;
-               queue_work(priv->wq, &omap_plane->work);
-       }
-
-
-       if (ovl->is_enabled(ovl)) {
-               omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
-                               info->out_width, info->out_height);
-       }
-
-       return 0;
-}
-
-/* when CRTC that we are attached to has potentially changed, this checks
- * if we are attached to proper manager, and if necessary updates.
- */
-static void update_manager(struct drm_plane *plane)
-{
-       struct omap_drm_private *priv = plane->dev->dev_private;
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       struct omap_overlay_manager *mgr = NULL;
-       int i;
-
-       if (plane->crtc) {
-               for (i = 0; i < priv->num_encoders; i++) {
-                       struct drm_encoder *encoder = priv->encoders[i];
-                       if (encoder->crtc == plane->crtc) {
-                               mgr = omap_encoder_get_manager(encoder);
-                               break;
-                       }
-               }
-       }
-
-       if (ovl->manager != mgr) {
-               bool enabled = ovl->is_enabled(ovl);
-
-               /* don't switch things around with enabled overlays: */
-               if (enabled)
-                       omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
-
-               if (ovl->manager) {
-                       DBG("disconnecting %s from %s", ovl->name,
-                                       ovl->manager->name);
-                       ovl->unset_manager(ovl);
-               }
-
-               if (mgr) {
-                       DBG("connecting %s to %s", ovl->name, mgr->name);
-                       ovl->set_manager(ovl, mgr);
-               }
-
-               if (enabled && mgr)
-                       omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
-       }
-}
-
 static void unpin(void *arg, struct drm_gem_object *bo)
 {
        struct drm_plane *plane = arg;
@@ -244,7 +72,6 @@ static void unpin(void *arg, struct drm_gem_object *bo)
 
        if (kfifo_put(&omap_plane->unpin_fifo,
                        (const struct drm_gem_object **)&bo)) {
-               omap_plane->pending_num_unpins++;
                /* also hold a ref so it isn't free'd while pinned */
                drm_gem_object_reference(bo);
        } else {
@@ -264,13 +91,19 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
 
                DBG("%p -> %p", pinned_fb, fb);
 
-               mutex_lock(&omap_plane->unpin_mutex);
+               if (fb)
+                       drm_framebuffer_reference(fb);
+
                ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
-               mutex_unlock(&omap_plane->unpin_mutex);
+
+               if (pinned_fb)
+                       drm_framebuffer_unreference(pinned_fb);
 
                if (ret) {
                        dev_err(plane->dev->dev, "could not swap %p -> %p\n",
                                        omap_plane->pinned_fb, fb);
+                       if (fb)
+                               drm_framebuffer_unreference(fb);
                        omap_plane->pinned_fb = NULL;
                        return ret;
                }
@@ -281,31 +114,90 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
        return 0;
 }
 
-/* update parameters that are dependent on the framebuffer dimensions and
- * position within the fb that this plane scans out from. This is called
- * when framebuffer or x,y base may have changed.
- */
-static void update_scanout(struct drm_plane *plane)
+static void omap_plane_pre_apply(struct omap_drm_apply *apply)
 {
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay_info *info = &omap_plane->info;
+       struct omap_plane *omap_plane =
+                       container_of(apply, struct omap_plane, apply);
        struct omap_drm_window *win = &omap_plane->win;
+       struct drm_plane *plane = &omap_plane->base;
+       struct drm_device *dev = plane->dev;
+       struct omap_overlay_info *info = &omap_plane->info;
+       struct drm_crtc *crtc = plane->crtc;
+       enum omap_channel channel;
+       bool enabled = omap_plane->enabled && crtc;
+       bool ilace, replication;
        int ret;
 
-       ret = update_pin(plane, plane->fb);
-       if (ret) {
-               dev_err(plane->dev->dev,
-                       "could not pin fb: %d\n", ret);
-               omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+       DBG("%s, enabled=%d", omap_plane->name, enabled);
+
+       /* if fb has changed, pin new fb: */
+       update_pin(plane, enabled ? plane->fb : NULL);
+
+       if (!enabled) {
+               dispc_ovl_enable(omap_plane->id, false);
                return;
        }
 
+       channel = omap_crtc_channel(crtc);
+
+       /* update scanout: */
        omap_framebuffer_update_scanout(plane->fb, win, info);
 
-       DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
-                       win->src_x, win->src_y,
-                       (u32)info->paddr, (u32)info->p_uv_addr,
+       DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
+                       info->out_width, info->out_height,
                        info->screen_width);
+       DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+                       info->paddr, info->p_uv_addr);
+
+       /* TODO: */
+       ilace = false;
+       replication = false;
+
+       /* and finally, update omapdss: */
+       ret = dispc_ovl_setup(omap_plane->id, info,
+                       replication, omap_crtc_timings(crtc), false);
+       if (ret) {
+               dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
+               return;
+       }
+
+       dispc_ovl_enable(omap_plane->id, true);
+       dispc_ovl_set_channel_out(omap_plane->id, channel);
+}
+
+static void omap_plane_post_apply(struct omap_drm_apply *apply)
+{
+       struct omap_plane *omap_plane =
+                       container_of(apply, struct omap_plane, apply);
+       struct drm_plane *plane = &omap_plane->base;
+       struct omap_overlay_info *info = &omap_plane->info;
+       struct drm_gem_object *bo = NULL;
+       struct callback cb;
+
+       cb = omap_plane->apply_done_cb;
+       omap_plane->apply_done_cb.fxn = NULL;
+
+       while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
+               omap_gem_put_paddr(bo);
+               drm_gem_object_unreference_unlocked(bo);
+       }
+
+       if (cb.fxn)
+               cb.fxn(cb.arg);
+
+       if (omap_plane->enabled) {
+               omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+                               info->out_width, info->out_height);
+       }
+}
+
+static int apply(struct drm_plane *plane)
+{
+       if (plane->crtc) {
+               struct omap_plane *omap_plane = to_omap_plane(plane);
+               return omap_crtc_apply(plane->crtc, &omap_plane->apply);
+       }
+       return 0;
 }
 
 int omap_plane_mode_set(struct drm_plane *plane,
@@ -313,7 +205,8 @@ int omap_plane_mode_set(struct drm_plane *plane,
                int crtc_x, int crtc_y,
                unsigned int crtc_w, unsigned int crtc_h,
                uint32_t src_x, uint32_t src_y,
-               uint32_t src_w, uint32_t src_h)
+               uint32_t src_w, uint32_t src_h,
+               void (*fxn)(void *), void *arg)
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
        struct omap_drm_window *win = &omap_plane->win;
@@ -329,17 +222,20 @@ int omap_plane_mode_set(struct drm_plane *plane,
        win->src_w = src_w >> 16;
        win->src_h = src_h >> 16;
 
-       /* note: this is done after this fxn returns.. but if we need
-        * to do a commit/update_scanout, etc before this returns we
-        * need the current value.
-        */
+       if (fxn) {
+               /* omap_crtc should ensure that a new page flip
+                * isn't permitted while there is one pending:
+                */
+               BUG_ON(omap_plane->apply_done_cb.fxn);
+
+               omap_plane->apply_done_cb.fxn = fxn;
+               omap_plane->apply_done_cb.arg = arg;
+       }
+
        plane->fb = fb;
        plane->crtc = crtc;
 
-       update_scanout(plane);
-       update_manager(plane);
-
-       return 0;
+       return apply(plane);
 }
 
 static int omap_plane_update(struct drm_plane *plane,
@@ -349,9 +245,12 @@ static int omap_plane_update(struct drm_plane *plane,
                uint32_t src_x, uint32_t src_y,
                uint32_t src_w, uint32_t src_h)
 {
-       omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x, src_y, src_w, src_h);
-       return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
+       struct omap_plane *omap_plane = to_omap_plane(plane);
+       omap_plane->enabled = true;
+       return omap_plane_mode_set(plane, crtc, fb,
+                       crtc_x, crtc_y, crtc_w, crtc_h,
+                       src_x, src_y, src_w, src_h,
+                       NULL, NULL);
 }
 
 static int omap_plane_disable(struct drm_plane *plane)
@@ -364,48 +263,32 @@ static int omap_plane_disable(struct drm_plane *plane)
 static void omap_plane_destroy(struct drm_plane *plane)
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
-       DBG("%s", omap_plane->ovl->name);
+
+       DBG("%s", omap_plane->name);
+
+       omap_irq_unregister(plane->dev, &omap_plane->error_irq);
+
        omap_plane_disable(plane);
        drm_plane_cleanup(plane);
-       WARN_ON(omap_plane->pending_num_unpins + omap_plane->num_unpins > 0);
+
+       WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
        kfifo_free(&omap_plane->unpin_fifo);
+
        kfree(omap_plane);
 }
 
 int omap_plane_dpms(struct drm_plane *plane, int mode)
 {
        struct omap_plane *omap_plane = to_omap_plane(plane);
-       struct omap_overlay *ovl = omap_plane->ovl;
-       int r;
+       bool enabled = (mode == DRM_MODE_DPMS_ON);
+       int ret = 0;
 
-       DBG("%s: %d", omap_plane->ovl->name, mode);
-
-       if (mode == DRM_MODE_DPMS_ON) {
-               update_scanout(plane);
-               r = commit(plane);
-               if (!r)
-                       r = ovl->enable(ovl);
-       } else {
-               struct omap_drm_private *priv = plane->dev->dev_private;
-               r = ovl->disable(ovl);
-               update_pin(plane, NULL);
-               queue_work(priv->wq, &omap_plane->work);
+       if (enabled != omap_plane->enabled) {
+               omap_plane->enabled = enabled;
+               ret = apply(plane);
        }
 
-       return r;
-}
-
-void omap_plane_on_endwin(struct drm_plane *plane,
-               void (*fxn)(void *), void *arg)
-{
-       struct omap_plane *omap_plane = to_omap_plane(plane);
-
-       mutex_lock(&omap_plane->unpin_mutex);
-       omap_plane->endwin.fxn = fxn;
-       omap_plane->endwin.arg = arg;
-       mutex_unlock(&omap_plane->unpin_mutex);
-
-       install_irq(plane);
+       return ret;
 }
 
 /* helper to install properties which are common to planes and crtcs */
@@ -454,25 +337,13 @@ int omap_plane_set_property(struct drm_plane *plane,
        int ret = -EINVAL;
 
        if (property == priv->rotation_prop) {
-               struct omap_overlay *ovl = omap_plane->ovl;
-
-               DBG("%s: rotation: %02x", ovl->name, (uint32_t)val);
+               DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
                omap_plane->win.rotation = val;
-
-               if (ovl->is_enabled(ovl))
-                       ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
-               else
-                       ret = 0;
+               ret = apply(plane);
        } else if (property == priv->zorder_prop) {
-               struct omap_overlay *ovl = omap_plane->ovl;
-
-               DBG("%s: zorder: %d", ovl->name, (uint32_t)val);
+               DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
                omap_plane->info.zorder = val;
-
-               if (ovl->is_enabled(ovl))
-                       ret = omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
-               else
-                       ret = 0;
+               ret = apply(plane);
        }
 
        return ret;
@@ -485,20 +356,38 @@ static const struct drm_plane_funcs omap_plane_funcs = {
                .set_property = omap_plane_set_property,
 };
 
+static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+{
+       struct omap_plane *omap_plane =
+                       container_of(irq, struct omap_plane, error_irq);
+       DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
+}
+
+static const char *plane_names[] = {
+               [OMAP_DSS_GFX] = "gfx",
+               [OMAP_DSS_VIDEO1] = "vid1",
+               [OMAP_DSS_VIDEO2] = "vid2",
+               [OMAP_DSS_VIDEO3] = "vid3",
+};
+
+static const uint32_t error_irqs[] = {
+               [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+               [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
 /* initialize plane */
 struct drm_plane *omap_plane_init(struct drm_device *dev,
-               struct omap_overlay *ovl, unsigned int possible_crtcs,
-               bool priv)
+               int id, bool private_plane)
 {
+       struct omap_drm_private *priv = dev->dev_private;
        struct drm_plane *plane = NULL;
        struct omap_plane *omap_plane;
+       struct omap_overlay_info *info;
        int ret;
 
-       DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
-                       possible_crtcs, priv);
-
-       /* friendly reminder to update table for future hw: */
-       WARN_ON(ovl->id >= ARRAY_SIZE(id2irq));
+       DBG("%s: priv=%d", plane_names[id], private_plane);
 
        omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
        if (!omap_plane) {
@@ -506,47 +395,50 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
                goto fail;
        }
 
-       mutex_init(&omap_plane->unpin_mutex);
-
        ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
        if (ret) {
                dev_err(dev->dev, "could not allocate unpin FIFO\n");
                goto fail;
        }
 
-       INIT_WORK(&omap_plane->work, unpin_worker);
-
        omap_plane->nformats = omap_framebuffer_get_formats(
                        omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
-                       ovl->supported_modes);
-       omap_plane->ovl = ovl;
+                       dss_feat_get_supported_color_modes(id));
+       omap_plane->id = id;
+       omap_plane->name = plane_names[id];
+
        plane = &omap_plane->base;
 
-       drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
-                       omap_plane->formats, omap_plane->nformats, priv);
+       omap_plane->apply.pre_apply  = omap_plane_pre_apply;
+       omap_plane->apply.post_apply = omap_plane_post_apply;
+
+       omap_plane->error_irq.irqmask = error_irqs[id];
+       omap_plane->error_irq.irq = omap_plane_error_irq;
+       omap_irq_register(dev, &omap_plane->error_irq);
+
+       drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
+                       omap_plane->formats, omap_plane->nformats, private_plane);
 
        omap_plane_install_properties(plane, &plane->base);
 
        /* get our starting configuration, set defaults for parameters
         * we don't currently use, etc:
         */
-       ovl->get_overlay_info(ovl, &omap_plane->info);
-       omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
-       omap_plane->info.rotation = OMAP_DSS_ROT_0;
-       omap_plane->info.global_alpha = 0xff;
-       omap_plane->info.mirror = 0;
+       info = &omap_plane->info;
+       info->rotation_type = OMAP_DSS_ROT_DMA;
+       info->rotation = OMAP_DSS_ROT_0;
+       info->global_alpha = 0xff;
+       info->mirror = 0;
 
        /* Set defaults depending on whether we are a CRTC or overlay
         * layer.
         * TODO add ioctl to give userspace an API to change this.. this
         * will come in a subsequent patch.
         */
-       if (priv)
+       if (private_plane)
                omap_plane->info.zorder = 0;
        else
-               omap_plane->info.zorder = ovl->id;
-
-       update_manager(plane);
+               omap_plane->info.zorder = id;
 
        return plane;
 
index ae38475854b50249f6787b90762cd025c26fef5b..d10d75e8a33fc6775b8901297a6c9de397b064ce 100644 (file)
@@ -937,7 +937,8 @@ short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
 
                dma_tmp = pci_map_single(pdev, buf, bufsize * sizeof(u8),
                                         PCI_DMA_FROMDEVICE);
-
+               if (pci_dma_mapping_error(pdev, dma_tmp))
+                       return -1;
                if (-1 == buffer_add(&(priv->rxbuffer), buf, dma_tmp,
                           &(priv->rxbufferhead))) {
                        DMESGE("Unable to allocate mem RX buf");
index 808aab6fa5ef70b23baf41cbe4bbad20ca6d9f9c..a9d78e9651c6006a981c7e26c920f71cd57692d2 100644 (file)
@@ -1183,6 +1183,8 @@ void  rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
                                                pTxFwInfo->TxRate,
                                                cb_desc);
 
+       if (pci_dma_mapping_error(priv->pdev, mapping))
+               RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
        if (cb_desc->bAMPDUEnable) {
                pTxFwInfo->AllowAggregation = 1;
                pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1280,6 +1282,8 @@ void  rtl8192_tx_fill_cmd_desc(struct net_device *dev,
        dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
                         PCI_DMA_TODEVICE);
 
+       if (pci_dma_mapping_error(priv->pdev, mapping))
+               RT_TRACE(COMP_ERR, "DMA Mapping error\n");;
        memset(entry, 0, 12);
        entry->LINIP = cb_desc->bLastIniPkt;
        entry->FirstSeg = 1;
index 1a70f324552f14becf86c43d347b4977b84cd94e..4ebf99b3097543b07cad50a264e700d70b926e27 100644 (file)
@@ -2104,7 +2104,10 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
                                                  skb_tail_pointer_rsl(skb),
                                                  priv->rxbuffersize,
                                                  PCI_DMA_FROMDEVICE);
-
+                       if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+                               dev_kfree_skb_any(skb);
+                               return -1;
+                       }
                        entry->BufferAddress = cpu_to_le32(*mapping);
 
                        entry->Length = priv->rxbuffersize;
@@ -2397,7 +2400,11 @@ static void rtl8192_rx_normal(struct net_device *dev)
                                                    skb_tail_pointer_rsl(skb),
                                                    priv->rxbuffersize,
                                                    PCI_DMA_FROMDEVICE);
-
+                       if (pci_dma_mapping_error(priv->pdev,
+                                                 *((dma_addr_t *)skb->cb))) {
+                               dev_kfree_skb_any(skb);
+                               return;
+                       }
                }
 done:
                pdesc->BufferAddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
index 6b73843e580af46e373c7885898b45e97baa9928..a96cd06d69dd7fc9ed78d71ec920763b8c33c022 100644 (file)
@@ -63,6 +63,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
        /* Belkin */
        {USB_DEVICE(0x050D, 0x945A)},
+       /* ISY IWL - Belkin clone */
+       {USB_DEVICE(0x050D, 0x11F1)},
        /* Corega */
        {USB_DEVICE(0x07AA, 0x0047)},
        /* D-Link */
index ac87c5e38dee05ec7f8e072a3d27c5c346594a13..1facad625554b0b88baf67c70e4a33b46d77560b 100644 (file)
@@ -2,6 +2,7 @@ config SB105X
        tristate "SystemBase PCI Multiport UART"
        select SERIAL_CORE
        depends on PCI
+       depends on X86
        help
          A driver for the SystemBase Multi-2/PCI serial card
 
index edb2a85b9d52512c41b161db44b5f74e93a44cd8..9464f3874346eb141f115c65752b9bbe41fc4c7d 100644 (file)
@@ -3054,6 +3054,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
                                sbdev->nr_ports = ((portnum_hex/16)*10) + (portnum_hex % 16);
                        }
                        break;
+#ifdef CONFIG_PARPORT_PC
                case PCI_DEVICE_ID_MP2S1P :
                        sbdev->nr_ports = 2;
 
@@ -3073,6 +3074,7 @@ static int init_mp_dev(struct pci_dev *pcidev, mppcibrd_t brd)
                        /* add PC compatible parallel port */
                        parport_pc_probe_port(pcidev->resource[2].start, pcidev->resource[3].start, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &pcidev->dev, 0);
                        break;
+#endif
        }
 
        ret = request_region(sbdev->uart_access_addr, (8*sbdev->nr_ports), sbdev->name);
index df9533798095cd7b2e2b08a1799f206c5091e07c..7616f058a00b43ab4c1dd91d0384e6a5400b9a65 100644 (file)
@@ -342,7 +342,7 @@ int synth_init(char *synth_name)
 
        mutex_lock(&spk_mutex);
        /* First, check if we already have it loaded. */
-       for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+       for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
                if (strcmp(synths[i]->name, synth_name) == 0)
                        synth = synths[i];
 
@@ -423,7 +423,7 @@ int synth_add(struct spk_synth *in_synth)
        int i;
        int status = 0;
        mutex_lock(&spk_mutex);
-       for (i = 0; synths[i] != NULL && i < MAXSYNTHS; i++)
+       for (i = 0; i < MAXSYNTHS && synths[i] != NULL; i++)
                /* synth_remove() is responsible for rotating the array down */
                if (in_synth == synths[i]) {
                        mutex_unlock(&spk_mutex);
index 543a127c7d4d63e5b43de81151ec54a382cf03b8..b783bfa59b1cea42f943596f8ddce5c620a08835 100644 (file)
@@ -31,7 +31,7 @@
  * driver should read or write to PRM/CM registers directly; they
  * should rely on OMAP core code to do this.
  */
-#include <mach-omap2/cm2xxx_3xxx.h>
+#include <mach-omap2/cm3xxx.h>
 #include <mach-omap2/prm-regbits-34xx.h>
 #include <mach-omap2/cm-regbits-34xx.h>
 #include <dspbridge/devdefs.h>
index b647207928b14aca75a5e445cce94e5efc56b940..2f084e181d39de2131360677c65c9deb4ed3143f 100644 (file)
@@ -121,9 +121,13 @@ void dsp_clk_exit(void)
        for (i = 0; i < DM_TIMER_CLOCKS; i++)
                omap_dm_timer_free(timer[i]);
 
+       clk_unprepare(iva2_clk);
        clk_put(iva2_clk);
+       clk_unprepare(ssi.sst_fck);
        clk_put(ssi.sst_fck);
+       clk_unprepare(ssi.ssr_fck);
        clk_put(ssi.ssr_fck);
+       clk_unprepare(ssi.ick);
        clk_put(ssi.ick);
 }
 
@@ -145,14 +149,21 @@ void dsp_clk_init(void)
        iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
        if (IS_ERR(iva2_clk))
                dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
+       else
+               clk_prepare(iva2_clk);
 
        ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
        ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
        ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
 
-       if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
+       if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) {
                dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
                                        ssi.sst_fck, ssi.ssr_fck, ssi.ick);
+       } else {
+               clk_prepare(ssi.sst_fck);
+               clk_prepare(ssi.ssr_fck);
+               clk_prepare(ssi.ick);
+       }
 }
 
 /**
index 1dce36fb828f1d24ebcd8ea4d19c5c86d9587c88..7ff0e6c980395c80c9c83fe4355a6eef854a6a79 100644 (file)
@@ -63,11 +63,15 @@ int dsp_wdt_init(void)
        dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
 
        if (!IS_ERR(dsp_wdt.fclk)) {
+               clk_prepare(dsp_wdt.fclk);
+
                dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
                if (IS_ERR(dsp_wdt.iclk)) {
                        clk_put(dsp_wdt.fclk);
                        dsp_wdt.fclk = NULL;
                        ret = -EFAULT;
+               } else {
+                       clk_prepare(dsp_wdt.iclk);
                }
        } else
                ret = -EFAULT;
@@ -95,10 +99,14 @@ void dsp_wdt_exit(void)
        free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
        tasklet_kill(&dsp_wdt.wdt3_tasklet);
 
-       if (dsp_wdt.fclk)
+       if (dsp_wdt.fclk) {
+               clk_unprepare(dsp_wdt.fclk);
                clk_put(dsp_wdt.fclk);
-       if (dsp_wdt.iclk)
+       }
+       if (dsp_wdt.iclk) {
+               clk_unprepare(dsp_wdt.iclk);
                clk_put(dsp_wdt.iclk);
+       }
 
        dsp_wdt.fclk = NULL;
        dsp_wdt.iclk = NULL;
index 0331178ca3b3c65d54da5a9ea0920118008e87f4..bf73ba26e88aee484b21dc5aeff7b661c07f0c90 100644 (file)
@@ -162,11 +162,9 @@ static struct vme_driver pio2_driver = {
 
 static int __init pio2_init(void)
 {
-       int retval = 0;
-
        if (bus_num == 0) {
                pr_err("No cards, skipping registration\n");
-               goto err_nocard;
+               return -ENODEV;
        }
 
        if (bus_num > PIO2_CARDS_MAX) {
@@ -176,15 +174,7 @@ static int __init pio2_init(void)
        }
 
        /* Register the PIO2 driver */
-       retval = vme_register_driver(&pio2_driver, bus_num);
-       if (retval != 0)
-               goto err_reg;
-
-       return retval;
-
-err_reg:
-err_nocard:
-       return retval;
+       return  vme_register_driver(&pio2_driver, bus_num);
 }
 
 static int pio2_match(struct vme_dev *vdev)
index 6b2ec390e77503e177989ef89ec205011d02b031..806cbf72fb59b6f58eb1419d98af755475fed311 100644 (file)
@@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject {
 } SRSNCapObject, *PSRSNCapObject;
 
 // BSS info(AP)
-#pragma pack(1)
 typedef struct tagKnownBSS {
     // BSS info
     BOOL            bActive;
index 5d8faf9f96ec4aac7f4e4c452b7d83dafb79bd35..e0d2b07ba608d14415f19daee1ae8d0924b6cf65 100644 (file)
@@ -34,7 +34,6 @@
 #include "device.h"
 
 /*---------------------  Export Definitions -------------------------*/
-#pragma pack(1)
 typedef struct tagSINTData {
        BYTE byTSR0;
        BYTE byPkt0;
index 22710cef751d77fe51dc9d760fc98574c6c94b44..ae6e2d237b207250a9ced56d059e590dccb61964 100644 (file)
@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
 // Ioctl interface structure
 // Command structure
 //
-#pragma pack(1)
 typedef struct tagSCmdRequest {
        u8 name[16];
        void    *data;
        u16         wResult;
        u16     wCmdCode;
-} SCmdRequest, *PSCmdRequest;
+} __packed SCmdRequest, *PSCmdRequest;
 
 //
 // Scan
@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
 
     u8     ssid[SSID_MAXLEN + 2];
 
-} SCmdScan, *PSCmdScan;
+} __packed SCmdScan, *PSCmdScan;
 
 //
 // BSS Join
@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
     BOOL    bPSEnable;
     BOOL    bShareKeyAuth;
 
-} SCmdBSSJoin, *PSCmdBSSJoin;
+} __packed SCmdBSSJoin, *PSCmdBSSJoin;
 
 //
 // Zonetype Setting
@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
  BOOL       bWrite;
  WZONETYPE  ZoneType;
 
-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
 
 typedef struct tagSWPAResult {
          char  ifname[100];
@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
        u8 key_mgmt;
        u8 eap_type;
          BOOL authenticated;
-} SWPAResult, *PSWPAResult;
+} __packed SWPAResult, *PSWPAResult;
 
 typedef struct tagSCmdStartAP {
 
@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
     BOOL    bShareKeyAuth;
     u8      byBasicRate;
 
-} SCmdStartAP, *PSCmdStartAP;
+} __packed SCmdStartAP, *PSCmdStartAP;
 
 typedef struct tagSCmdSetWEP {
 
@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
     BOOL    bWepKeyAvailable[WEP_NKEYS];
     u32     auWepKeyLength[WEP_NKEYS];
 
-} SCmdSetWEP, *PSCmdSetWEP;
+} __packed SCmdSetWEP, *PSCmdSetWEP;
 
 typedef struct tagSBSSIDItem {
 
@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
     BOOL    bWEPOn;
     u32     uRSSI;
 
-} SBSSIDItem;
+} __packed SBSSIDItem;
 
 
 typedef struct tagSBSSIDList {
 
        u32                 uItem;
        SBSSIDItem      sBSSIDList[0];
-} SBSSIDList, *PSBSSIDList;
+} __packed SBSSIDList, *PSBSSIDList;
 
 
 typedef struct tagSNodeItem {
@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
     u32            uTxAttempts;
     u16            wFailureRatio;
 
-} SNodeItem;
+} __packed SNodeItem;
 
 
 typedef struct tagSNodeList {
@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
        u32                 uItem;
        SNodeItem       sNodeList[0];
 
-} SNodeList, *PSNodeList;
+} __packed SNodeList, *PSNodeList;
 
 
 typedef struct tagSCmdLinkStatus {
@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
     u32     uChannel;
     u32     uLinkRate;
 
-} SCmdLinkStatus, *PSCmdLinkStatus;
+} __packed SCmdLinkStatus, *PSCmdLinkStatus;
 
 //
 // 802.11 counter
@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
     u32 ReceivedFragmentCount;
     u32 MulticastReceivedFrameCount;
     u32 FCSErrorCount;
-} SDot11MIBCount, *PSDot11MIBCount;
+} __packed SDot11MIBCount, *PSDot11MIBCount;
 
 
 
@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
     u32   ullTxBroadcastBytes[2];
     u32   ullTxMulticastBytes[2];
     u32   ullTxDirectedBytes[2];
-} SStatMIBCount, *PSStatMIBCount;
+} __packed SStatMIBCount, *PSStatMIBCount;
 
 typedef struct tagSCmdValue {
 
     u32     dwValue;
 
-} SCmdValue,  *PSCmdValue;
+} __packed SCmdValue,  *PSCmdValue;
 
 //
 // hostapd & viawget ioctl related
@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
                        u8 ssid[32];
                } scan_req;
        } u;
-};
+} __packed;
 
 /*---------------------  Export Classes  ----------------------------*/
 
index 959c8868f6e2add1f86531ae68e96e5b58eb83fa..2522ddec718d3dd3f930097d80841ec2b8bdda88 100644 (file)
@@ -67,12 +67,11 @@ enum {
 
 
 
-#pragma pack(1)
 typedef struct viawget_wpa_header {
        u8 type;
        u16 req_ie_len;
        u16 resp_ie_len;
-} viawget_wpa_header;
+} __packed viawget_wpa_header;
 
 struct viawget_wpa_param {
        u32 cmd;
@@ -113,9 +112,8 @@ struct viawget_wpa_param {
                        u8 *buf;
                } scan_results;
        } u;
-};
+} __packed;
 
-#pragma pack(1)
 struct viawget_scan_result {
        u8 bssid[6];
        u8 ssid[32];
@@ -130,7 +128,7 @@ struct viawget_scan_result {
        int noise;
        int level;
        int maxrate;
-};
+} __packed;
 
 /*---------------------  Export Classes  ----------------------------*/
 
index 18c06a59c091b7bb1928cbac9038f5f0809a2288..1d31eab19d16b2d1ef5c2714162046da08c68500 100644 (file)
@@ -638,8 +638,8 @@ int prism2_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 }
 
 
-int prism2_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type,
-                       int mbm)
+int prism2_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+                       enum nl80211_tx_power_setting type, int mbm)
 {
        struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
        wlandevice_t *wlandev = priv->wlandev;
@@ -665,7 +665,8 @@ exit:
        return err;
 }
 
-int prism2_get_tx_power(struct wiphy *wiphy, int *dbm)
+int prism2_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+                       int *dbm)
 {
        struct prism2_wiphy_private *priv = wiphy_priv(wiphy);
        wlandevice_t *wlandev = priv->wlandev;
index 4efa9bc0fcf0d1750152de5342d565144a2427ce..89bfd858bb280949f499c463a6ec26034bddcdad 100644 (file)
@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
        /* SSID */
        req->ssid.status = P80211ENUM_msgitem_status_data_ok;
        req->ssid.data.len = le16_to_cpu(item->ssid.len);
-       req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
+       req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
        memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
 
        /* supported rates */
index fb4a7c94aed36f087cb41cc771f947b1d6776678..f2a73bd739fb94b750e66e1ee2c931e3c971a52a 100644 (file)
@@ -265,7 +265,7 @@ out_cleanup:
 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                           int offset)
 {
-       int ret;
+       int ret = 0;
        size_t clen;
        unsigned long handle;
        struct page *page;
@@ -286,10 +286,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
                        goto out;
                }
                ret = zram_decompress_page(zram, uncmem, index);
-               if (ret) {
-                       kfree(uncmem);
+               if (ret)
                        goto out;
-               }
        }
 
        /*
@@ -302,16 +300,18 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        user_mem = kmap_atomic(page);
 
-       if (is_partial_io(bvec))
+       if (is_partial_io(bvec)) {
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
                       bvec->bv_len);
-       else
+               kunmap_atomic(user_mem);
+               user_mem = NULL;
+       } else {
                uncmem = user_mem;
+       }
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem);
-               if (is_partial_io(bvec))
-                       kfree(uncmem);
+               if (!is_partial_io(bvec))
+                       kunmap_atomic(user_mem);
                zram_stat_inc(&zram->stats.pages_zero);
                zram_set_flag(zram, index, ZRAM_ZERO);
                ret = 0;
@@ -321,9 +321,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               zram->compress_workmem);
 
-       kunmap_atomic(user_mem);
-       if (is_partial_io(bvec))
-                       kfree(uncmem);
+       if (!is_partial_io(bvec)) {
+               kunmap_atomic(user_mem);
+               user_mem = NULL;
+               uncmem = NULL;
+       }
 
        if (unlikely(ret != LZO_E_OK)) {
                pr_err("Compression failed! err=%d\n", ret);
@@ -332,8 +334,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 
        if (unlikely(clen > max_zpage_size)) {
                zram_stat_inc(&zram->stats.bad_compress);
-               src = uncmem;
                clen = PAGE_SIZE;
+               src = NULL;
+               if (is_partial_io(bvec))
+                       src = uncmem;
        }
 
        handle = zs_malloc(zram->mem_pool, clen);
@@ -345,7 +349,11 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        }
        cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
 
+       if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+               src = kmap_atomic(page);
        memcpy(cmem, src, clen);
+       if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
+               kunmap_atomic(src);
 
        zs_unmap_object(zram->mem_pool, handle);
 
@@ -358,9 +366,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        if (clen <= PAGE_SIZE / 2)
                zram_stat_inc(&zram->stats.good_compress);
 
-       return 0;
-
 out:
+       if (is_partial_io(bvec))
+               kfree(uncmem);
+
        if (ret)
                zram_stat64_inc(zram, &zram->stats.failed_writes);
        return ret;
index 9ac4c151eae43af4efdaac0b0f36c1ef301f068f..ba6091bf93fcde3c1463d6ba54a8ae07f812ddf0 100644 (file)
@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                 * made generic here.
                 */
                if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
-                    iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
+                    iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
                        iscsit_free_cmd(cmd);
index 85140f7dde1eec83305d4e41cc0eab665fb87df0..7d4ec02e29a9773d3eb1ddbcd1fc404e5ee334e8 100644 (file)
@@ -212,7 +212,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
        unsigned char *buf;
        unsigned char *ptr;
-       sense_reason_t rc;
+       sense_reason_t rc = TCM_NO_SENSE;
        u32 len = 4; /* Skip over RESERVED area in header */
        int alua_access_state, primary = 0;
        u16 tg_pt_id, rtpi;
index e2695101bb9911f1d02ba03217a3b0f059fbf0ac..f2aa7543d20aca556855c4fbde2ba3aa00b8ffcc 100644 (file)
@@ -941,6 +941,8 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 
 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
 {
+       int block_size = dev->dev_attrib.block_size;
+
        if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device"
                        " fabric_max_sectors while export_count is %d\n",
@@ -978,8 +980,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
        /*
         * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
         */
+       if (!block_size) {
+               block_size = 512;
+               pr_warn("Defaulting to 512 for zero block_size\n");
+       }
        fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     dev->dev_attrib.block_size);
+                                                     block_size);
 
        dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
        pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
index 810263dfa4a1d9d272e16a492c5fb44365967a6c..c57bbbc7a7d10c428d750f577a381cd2689291b3 100644 (file)
@@ -754,6 +754,11 @@ static int target_fabric_port_link(
                return -EFAULT;
        }
 
+       if (!(dev->dev_flags & DF_CONFIGURED)) {
+               pr_err("se_device not configured yet, cannot port link\n");
+               return -ENODEV;
+       }
+
        tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
        se_tpg = container_of(to_config_group(tpg_ci),
                                struct se_portal_group, tpg_group);
index e35dbf85841fa6067094a336ef23d9edbb3427e6..8e0290b38e431ed070f4a84b8db078cf446fd8e3 100644 (file)
@@ -2053,7 +2053,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
        /* Used for APTPL metadata w/ UNREGISTER */
        unsigned char *pr_aptpl_buf = NULL;
        unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
-       sense_reason_t ret;
+       sense_reason_t ret = TCM_NO_SENSE;
        int pr_holder = 0, type;
 
        if (!se_sess || !se_lun) {
index 26a6d183ccb1c3f1f5e9eb6ec213d150a7ebfa6b..a664c664a31ac6accf24d557b66c7ab12e2d9596 100644 (file)
@@ -58,11 +58,10 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
        buf[7] = dev->dev_attrib.block_size & 0xff;
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (!rbuf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd(cmd, GOOD);
        return 0;
@@ -97,11 +96,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
                buf[14] = 0x80;
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (!rbuf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd(cmd, GOOD);
        return 0;
index 84f9e96e8ace79d41ea71723f8ca92a28c00623a..2d88f087d9616a53ca7dad67cb44c5cf2a3d33e3 100644 (file)
@@ -641,11 +641,10 @@ spc_emulate_inquiry(struct se_cmd *cmd)
 
 out:
        rbuf = transport_kmap_data_sg(cmd);
-       if (!rbuf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-       transport_kunmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        if (!ret)
                target_complete_cmd(cmd, GOOD);
@@ -851,7 +850,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        char *cdb = cmd->t_task_cdb;
-       unsigned char *buf, *map_buf;
+       unsigned char buf[SE_MODE_PAGE_BUF], *rbuf;
        int type = dev->transport->get_device_type(dev);
        int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
        bool dbd = !!(cdb[1] & 0x08);
@@ -863,26 +862,8 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        int ret;
        int i;
 
-       map_buf = transport_kmap_data_sg(cmd);
-       if (!map_buf)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       /*
-        * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
-        * know we actually allocated a full page.  Otherwise, if the
-        * data buffer is too small, allocate a temporary buffer so we
-        * don't have to worry about overruns in all our INQUIRY
-        * emulation handling.
-        */
-       if (cmd->data_length < SE_MODE_PAGE_BUF &&
-           (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
-               buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
-               if (!buf) {
-                       transport_kunmap_data_sg(cmd);
-                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               }
-       } else {
-               buf = map_buf;
-       }
+       memset(buf, 0, SE_MODE_PAGE_BUF);
+
        /*
         * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
         * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
@@ -934,8 +915,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        if (page == 0x3f) {
                if (subpage != 0x00 && subpage != 0xff) {
                        pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
-                       kfree(buf);
-                       transport_kunmap_data_sg(cmd);
                        return TCM_INVALID_CDB_FIELD;
                }
 
@@ -972,7 +951,6 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       page, subpage);
 
-       transport_kunmap_data_sg(cmd);
        return TCM_UNKNOWN_MODE_PAGE;
 
 set_length:
@@ -981,12 +959,12 @@ set_length:
        else
                buf[0] = length - 1;
 
-       if (buf != map_buf) {
-               memcpy(map_buf, buf, cmd->data_length);
-               kfree(buf);
+       rbuf = transport_kmap_data_sg(cmd);
+       if (rbuf) {
+               memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
+               transport_kunmap_data_sg(cmd);
        }
 
-       transport_kunmap_data_sg(cmd);
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
index c23c76ccef65aa1da973fd2869389a72774daacc..bd587b70661a0ce11f6aaab596f4e46c6ee14c7e 100644 (file)
@@ -541,9 +541,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
-       if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
-               transport_lun_remove_cmd(cmd);
-
        if (transport_cmd_check_stop_to_fabric(cmd))
                return;
        if (remove)
@@ -1396,6 +1393,8 @@ static void target_complete_tmr_failure(struct work_struct *work)
 
        se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
        se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+
+       transport_cmd_check_stop_to_fabric(se_cmd);
 }
 
 /**
@@ -1688,6 +1687,7 @@ void target_execute_cmd(struct se_cmd *cmd)
        }
 
        cmd->t_state = TRANSPORT_PROCESSING;
+       cmd->transport_state |= CMD_T_ACTIVE;
        spin_unlock_irq(&cmd->t_state_lock);
 
        if (!target_handle_task_attr(cmd))
@@ -2597,6 +2597,16 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
         * SENSE KEY values from include/scsi/scsi.h
         */
        switch (reason) {
+       case TCM_NO_SENSE:
+               /* CURRENT ERROR */
+               buffer[0] = 0x70;
+               buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* Not Ready */
+               buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
+               /* NO ADDITIONAL SENSE INFORMATION */
+               buffer[SPC_ASC_KEY_OFFSET] = 0;
+               buffer[SPC_ASCQ_KEY_OFFSET] = 0;
+               break;
        case TCM_NON_EXISTENT_LUN:
                /* CURRENT ERROR */
                buffer[0] = 0x70;
@@ -2743,7 +2753,7 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
                /* ILLEGAL REQUEST */
                buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT COMMUNICATION FAILURE */
-               buffer[SPC_ASC_KEY_OFFSET] = 0x80;
+               buffer[SPC_ASC_KEY_OFFSET] = 0x08;
                break;
        }
        /*
@@ -2804,6 +2814,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 
+       transport_lun_remove_cmd(cmd);
+
        pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
                " ITT: 0x%08x\n", cmd->t_task_cdb[0],
                cmd->se_tfo->get_task_tag(cmd));
index 12d6fa21e5e19b68631dbf5b2d70f98f7cfbf864..6659dd36e806519af35acb47d3a85cf665bd4a38 100644 (file)
@@ -355,11 +355,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
 
        tport = ft_tport_create(rdata->local_port);
        if (!tport)
-               return 0;       /* not a target for this local port */
+               goto not_target;        /* not a target for this local port */
 
        acl = ft_acl_get(tport->tpg, rdata);
        if (!acl)
-               return 0;
+               goto not_target;        /* no target for this remote */
 
        if (!rspp)
                goto fill;
@@ -396,12 +396,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
 
        /*
         * OR in our service parameters with other provider (initiator), if any.
-        * TBD XXX - indicate RETRY capability?
         */
 fill:
        fcp_parm = ntohl(spp->spp_params);
+       fcp_parm &= ~FCP_SPPF_RETRY;
        spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
        return FC_SPP_RESP_ACK;
+
+not_target:
+       fcp_parm = ntohl(spp->spp_params);
+       fcp_parm &= ~FCP_SPPF_TARG_FCN;
+       spp->spp_params = htonl(fcp_parm);
+       return 0;
 }
 
 /**
index be6a373601b783677b876a512854edcfb28f3732..79ff3a5e925d6fdc252c6aa6d2f066e959e4637f 100644 (file)
@@ -441,6 +441,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
                return pty_get_pktmode(tty, (int __user *)arg);
        case TIOCSIG:    /* Send signal to other side of pty */
                return pty_signal(tty, (int) arg);
+       case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */
+               return -EINVAL;
        }
        return -ENOIOCTLCMD;
 }
index d085e3a8ec0644bc9760caa0aed026533905c2a4..f9320437a64971c605b3e153d2a8aa4b160155bf 100644 (file)
@@ -300,6 +300,12 @@ static const struct serial8250_config uart_config[] = {
                                  UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
                .flags          = UART_CAP_FIFO,
        },
+       [PORT_BRCM_TRUMANAGE] = {
+               .name           = "TruManage",
+               .fifo_size      = 1,
+               .tx_loadsz      = 1024,
+               .flags          = UART_CAP_HFIFO,
+       },
        [PORT_8250_CIR] = {
                .name           = "CIR port"
        }
@@ -1490,6 +1496,11 @@ void serial8250_tx_chars(struct uart_8250_port *up)
                port->icount.tx++;
                if (uart_circ_empty(xmit))
                        break;
+               if (up->capabilities & UART_CAP_HFIFO) {
+                       if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
+                           BOTH_EMPTY)
+                               break;
+               }
        } while (--count > 0);
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
index 3b4ea84898c2e719dc233ecddde63dc3c421bd0d..12caa1292b75c20b3d8221d4576a6f97c69db3d4 100644 (file)
@@ -40,6 +40,7 @@ struct serial8250_config {
 #define UART_CAP_AFE   (1 << 11)       /* MCR-based hw flow control */
 #define UART_CAP_UUE   (1 << 12)       /* UART needs IER bit 6 set (Xscale) */
 #define UART_CAP_RTOIE (1 << 13)       /* UART needs IER bit 4 set (Xscale, Tegra) */
+#define UART_CAP_HFIFO (1 << 14)       /* UART has a "hidden" FIFO */
 
 #define UART_BUG_QUOT  (1 << 0)        /* UART has buggy quot LSB */
 #define UART_BUG_TXEN  (1 << 1)        /* UART has buggy TX IIR status */
index 1d0dba2d562d27bb72a3e91f2273176c4cd67f41..096d2ef48b32f6b62abfe5c220a44440545fb0b4 100644 (file)
@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
        } else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
                /* Clear the USR and write the LCR again. */
                (void)p->serial_in(p, UART_USR);
-               p->serial_out(p, d->last_lcr, UART_LCR);
+               p->serial_out(p, UART_LCR, d->last_lcr);
 
                return 1;
        }
index 26b9dc012ed0b64ec1f1b3807b573af0f5f82b91..a27a98e1b0667fc8bfd7e379eaaaa1cfa6c9229a 100644 (file)
@@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv,
        return setup_port(priv, port, 2, idx * 8, 0);
 }
 
+static int
+pci_brcm_trumanage_setup(struct serial_private *priv,
+                        const struct pciserial_board *board,
+                        struct uart_8250_port *port, int idx)
+{
+       int ret = pci_default_setup(priv, board, port, idx);
+
+       port->port.type = PORT_BRCM_TRUMANAGE;
+       port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
+       return ret;
+}
+
 static int skip_tx_en_setup(struct serial_private *priv,
                        const struct pciserial_board *board,
                        struct uart_8250_port *port, int idx)
@@ -1301,9 +1313,10 @@ pci_wch_ch353_setup(struct serial_private *priv,
 #define PCI_VENDOR_ID_AGESTAR          0x5372
 #define PCI_DEVICE_ID_AGESTAR_9375     0x6872
 #define PCI_VENDOR_ID_ASIX             0x9710
-#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0019
 #define PCI_DEVICE_ID_COMMTECH_4224PCIE        0x0020
 #define PCI_DEVICE_ID_COMMTECH_4228PCIE        0x0021
+#define PCI_DEVICE_ID_COMMTECH_4222PCIE        0x0022
+#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
 
 
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
@@ -1953,6 +1966,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_xr17v35x_setup,
        },
+       /*
+        * Broadcom TruManage (NetXtreme)
+        */
+       {
+               .vendor         = PCI_VENDOR_ID_BROADCOM,
+               .device         = PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_brcm_trumanage_setup,
+       },
+
        /*
         * Default "match everything" terminator entry
         */
@@ -2148,6 +2172,7 @@ enum pci_board_num_t {
        pbn_ce4100_1_115200,
        pbn_omegapci,
        pbn_NETMOS9900_2s_115200,
+       pbn_brcm_trumanage,
 };
 
 /*
@@ -2246,7 +2271,7 @@ static struct pciserial_board pci_boards[] = {
 
        [pbn_b0_8_1152000_200] = {
                .flags          = FL_BASE0,
-               .num_ports      = 2,
+               .num_ports      = 8,
                .base_baud      = 1152000,
                .uart_offset    = 0x200,
        },
@@ -2892,6 +2917,12 @@ static struct pciserial_board pci_boards[] = {
                .num_ports      = 2,
                .base_baud      = 115200,
        },
+       [pbn_brcm_trumanage] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 1,
+               .reg_shift      = 2,
+               .base_baud      = 115200,
+       },
 };
 
 static const struct pci_device_id blacklist[] = {
@@ -4470,6 +4501,13 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_omegapci },
 
+       /*
+        * Broadcom TruManage
+        */
+       {       PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               pbn_brcm_trumanage },
+
        /*
         * AgeStar as-prs2-009
         */
index 675d94ab0aff63f97de2f52e7da565b8e5309013..8cb6d8d66a1362b1b2f5566a875c1f56248a3b29 100644 (file)
@@ -637,6 +637,7 @@ static void ifx_port_shutdown(struct tty_port *port)
 
        clear_bit(IFX_SPI_STATE_IO_AVAILABLE, &ifx_dev->flags);
        mrdy_set_low(ifx_dev);
+       del_timer(&ifx_dev->spi_timer);
        clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
        tasklet_kill(&ifx_dev->io_work_tasklet);
 }
@@ -810,7 +811,8 @@ static void ifx_spi_io(unsigned long data)
                ifx_dev->spi_xfer.cs_change = 0;
                ifx_dev->spi_xfer.speed_hz = ifx_dev->spi_dev->max_speed_hz;
                /* ifx_dev->spi_xfer.speed_hz = 390625; */
-               ifx_dev->spi_xfer.bits_per_word = spi_bpw;
+               ifx_dev->spi_xfer.bits_per_word =
+                       ifx_dev->spi_dev->bits_per_word;
 
                ifx_dev->spi_xfer.tx_buf = ifx_dev->tx_buffer;
                ifx_dev->spi_xfer.rx_buf = ifx_dev->rx_buffer;
index 7ce3197087bbab460e1b498cb2e864304b78ea44..dd6277eb5a3820408e946c63abe26cf072b88fdf 100644 (file)
@@ -179,8 +179,7 @@ static void max3100_work(struct work_struct *w);
 
 static void max3100_dowork(struct max3100_port *s)
 {
-       if (!s->force_end_work && !work_pending(&s->work) &&
-           !freezing(current) && !s->suspending)
+       if (!s->force_end_work && !freezing(current) && !s->suspending)
                queue_work(s->workqueue, &s->work);
 }
 
index 6db23b035efe0766dcc99e8bddb0a301dd458217..e55615eb34ad32037cf78dc18aef63a00373e1ee 100644 (file)
@@ -253,7 +253,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
        struct circ_buf *xmit = &s->port.state->xmit;
 
        if (auart_dma_enabled(s)) {
-               int i = 0;
+               u32 i = 0;
                int size;
                void *buffer = s->tx_dma_buf;
 
@@ -412,10 +412,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
 
        u32 ctrl = readl(u->membase + AUART_CTRL2);
 
-       ctrl &= ~AUART_CTRL2_RTSEN;
+       ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);
        if (mctrl & TIOCM_RTS) {
                if (tty_port_cts_enabled(&u->state->port))
                        ctrl |= AUART_CTRL2_RTSEN;
+               else
+                       ctrl |= AUART_CTRL2_RTS;
        }
 
        s->ctrl = mctrl;
index 12e5249d053e79b8d15991daf27614755c03c3bf..e514b3a4dc572069da4df72cba488f6be9cd1c98 100644 (file)
@@ -1006,7 +1006,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
 
        ucon &= ucon_mask;
        wr_regl(port, S3C2410_UCON,  ucon | cfg->ucon);
-       wr_regl(port, S3C2410_ULCON, cfg->ulcon);
 
        /* reset both fifos */
        wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
index 8fd181436a6ba246e2247c5be871ac7f3c71c760..d5ed9f61300562c8febbb8a8095304a1c9955899 100644 (file)
@@ -604,7 +604,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
        vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
 
        vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
-       if (vt8500_port->clk) {
+       if (!IS_ERR(vt8500_port->clk)) {
                vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk);
        } else {
                /* use the default of 24Mhz if not specified and warn */
index b3c4a250ff86c6ad71b0e91de5cf02892a9d62a2..814655ee2d6126bace613a3464d81475f2be9362 100644 (file)
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/fs.h>
@@ -41,6 +42,7 @@
 #include <linux/slab.h>
 #include <linux/input.h>
 #include <linux/uaccess.h>
+#include <linux/moduleparam.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
@@ -577,8 +579,71 @@ struct sysrq_state {
        bool active;
        bool need_reinject;
        bool reinjecting;
+
+       /* reset sequence handling */
+       bool reset_canceled;
+       unsigned long reset_keybit[BITS_TO_LONGS(KEY_CNT)];
+       int reset_seq_len;
+       int reset_seq_cnt;
+       int reset_seq_version;
 };
 
+#define SYSRQ_KEY_RESET_MAX    20 /* Should be plenty */
+static unsigned short sysrq_reset_seq[SYSRQ_KEY_RESET_MAX];
+static unsigned int sysrq_reset_seq_len;
+static unsigned int sysrq_reset_seq_version = 1;
+
+static void sysrq_parse_reset_sequence(struct sysrq_state *state)
+{
+       int i;
+       unsigned short key;
+
+       state->reset_seq_cnt = 0;
+
+       for (i = 0; i < sysrq_reset_seq_len; i++) {
+               key = sysrq_reset_seq[i];
+
+               if (key == KEY_RESERVED || key > KEY_MAX)
+                       break;
+
+               __set_bit(key, state->reset_keybit);
+               state->reset_seq_len++;
+
+               if (test_bit(key, state->key_down))
+                       state->reset_seq_cnt++;
+       }
+
+       /* Disable reset until old keys are not released */
+       state->reset_canceled = state->reset_seq_cnt != 0;
+
+       state->reset_seq_version = sysrq_reset_seq_version;
+}
+
+static bool sysrq_detect_reset_sequence(struct sysrq_state *state,
+                                       unsigned int code, int value)
+{
+       if (!test_bit(code, state->reset_keybit)) {
+               /*
+                * Pressing any key _not_ in reset sequence cancels
+                * the reset sequence.
+                */
+               if (value && state->reset_seq_cnt)
+                       state->reset_canceled = true;
+       } else if (value == 0) {
+               /* key release */
+               if (--state->reset_seq_cnt == 0)
+                       state->reset_canceled = false;
+       } else if (value == 1) {
+               /* key press, not autorepeat */
+               if (++state->reset_seq_cnt == state->reset_seq_len &&
+                   !state->reset_canceled) {
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static void sysrq_reinject_alt_sysrq(struct work_struct *work)
 {
        struct sysrq_state *sysrq =
@@ -605,100 +670,121 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
        }
 }
 
-static bool sysrq_filter(struct input_handle *handle,
-                        unsigned int type, unsigned int code, int value)
+static bool sysrq_handle_keypress(struct sysrq_state *sysrq,
+                                 unsigned int code, int value)
 {
-       struct sysrq_state *sysrq = handle->private;
        bool was_active = sysrq->active;
        bool suppress;
 
-       /*
-        * Do not filter anything if we are in the process of re-injecting
-        * Alt+SysRq combination.
-        */
-       if (sysrq->reinjecting)
-               return false;
+       switch (code) {
 
-       switch (type) {
+       case KEY_LEFTALT:
+       case KEY_RIGHTALT:
+               if (!value) {
+                       /* One of ALTs is being released */
+                       if (sysrq->active && code == sysrq->alt_use)
+                               sysrq->active = false;
 
-       case EV_SYN:
-               suppress = false;
+                       sysrq->alt = KEY_RESERVED;
+
+               } else if (value != 2) {
+                       sysrq->alt = code;
+                       sysrq->need_reinject = false;
+               }
                break;
 
-       case EV_KEY:
-               switch (code) {
+       case KEY_SYSRQ:
+               if (value == 1 && sysrq->alt != KEY_RESERVED) {
+                       sysrq->active = true;
+                       sysrq->alt_use = sysrq->alt;
+                       /*
+                        * If nothing else will be pressed we'll need
+                        * to re-inject Alt-SysRq keysroke.
+                        */
+                       sysrq->need_reinject = true;
+               }
 
-               case KEY_LEFTALT:
-               case KEY_RIGHTALT:
-                       if (!value) {
-                               /* One of ALTs is being released */
-                               if (sysrq->active && code == sysrq->alt_use)
-                                       sysrq->active = false;
+               /*
+                * Pretend that sysrq was never pressed at all. This
+                * is needed to properly handle KGDB which will try
+                * to release all keys after exiting debugger. If we
+                * do not clear key bit it KGDB will end up sending
+                * release events for Alt and SysRq, potentially
+                * triggering print screen function.
+                */
+               if (sysrq->active)
+                       clear_bit(KEY_SYSRQ, sysrq->handle.dev->key);
 
-                               sysrq->alt = KEY_RESERVED;
+               break;
 
-                       } else if (value != 2) {
-                               sysrq->alt = code;
-                               sysrq->need_reinject = false;
-                       }
-                       break;
+       default:
+               if (sysrq->active && value && value != 2) {
+                       sysrq->need_reinject = false;
+                       __handle_sysrq(sysrq_xlate[code], true);
+               }
+               break;
+       }
 
-               case KEY_SYSRQ:
-                       if (value == 1 && sysrq->alt != KEY_RESERVED) {
-                               sysrq->active = true;
-                               sysrq->alt_use = sysrq->alt;
-                               /*
-                                * If nothing else will be pressed we'll need
-                                * to re-inject Alt-SysRq keysroke.
-                                */
-                               sysrq->need_reinject = true;
-                       }
+       suppress = sysrq->active;
 
-                       /*
-                        * Pretend that sysrq was never pressed at all. This
-                        * is needed to properly handle KGDB which will try
-                        * to release all keys after exiting debugger. If we
-                        * do not clear key bit it KGDB will end up sending
-                        * release events for Alt and SysRq, potentially
-                        * triggering print screen function.
-                        */
-                       if (sysrq->active)
-                               clear_bit(KEY_SYSRQ, handle->dev->key);
+       if (!sysrq->active) {
 
-                       break;
+               /*
+                * See if reset sequence has changed since the last time.
+                */
+               if (sysrq->reset_seq_version != sysrq_reset_seq_version)
+                       sysrq_parse_reset_sequence(sysrq);
 
-               default:
-                       if (sysrq->active && value && value != 2) {
-                               sysrq->need_reinject = false;
-                               __handle_sysrq(sysrq_xlate[code], true);
-                       }
-                       break;
+               /*
+                * If we are not suppressing key presses keep track of
+                * keyboard state so we can release keys that have been
+                * pressed before entering SysRq mode.
+                */
+               if (value)
+                       set_bit(code, sysrq->key_down);
+               else
+                       clear_bit(code, sysrq->key_down);
+
+               if (was_active)
+                       schedule_work(&sysrq->reinject_work);
+
+               if (sysrq_detect_reset_sequence(sysrq, code, value)) {
+                       /* Force emergency reboot */
+                       __handle_sysrq(sysrq_xlate[KEY_B], false);
                }
 
-               suppress = sysrq->active;
+       } else if (value == 0 && test_and_clear_bit(code, sysrq->key_down)) {
+               /*
+                * Pass on release events for keys that was pressed before
+                * entering SysRq mode.
+                */
+               suppress = false;
+       }
 
-               if (!sysrq->active) {
-                       /*
-                        * If we are not suppressing key presses keep track of
-                        * keyboard state so we can release keys that have been
-                        * pressed before entering SysRq mode.
-                        */
-                       if (value)
-                               set_bit(code, sysrq->key_down);
-                       else
-                               clear_bit(code, sysrq->key_down);
+       return suppress;
+}
 
-                       if (was_active)
-                               schedule_work(&sysrq->reinject_work);
+static bool sysrq_filter(struct input_handle *handle,
+                        unsigned int type, unsigned int code, int value)
+{
+       struct sysrq_state *sysrq = handle->private;
+       bool suppress;
 
-               } else if (value == 0 &&
-                          test_and_clear_bit(code, sysrq->key_down)) {
-                       /*
-                        * Pass on release events for keys that was pressed before
-                        * entering SysRq mode.
-                        */
-                       suppress = false;
-               }
+       /*
+        * Do not filter anything if we are in the process of re-injecting
+        * Alt+SysRq combination.
+        */
+       if (sysrq->reinjecting)
+               return false;
+
+       switch (type) {
+
+       case EV_SYN:
+               suppress = false;
+               break;
+
+       case EV_KEY:
+               suppress = sysrq_handle_keypress(sysrq, code, value);
                break;
 
        default:
@@ -786,7 +872,20 @@ static bool sysrq_handler_registered;
 
 static inline void sysrq_register_handler(void)
 {
+       extern unsigned short platform_sysrq_reset_seq[] __weak;
+       unsigned short key;
        int error;
+       int i;
+
+       if (platform_sysrq_reset_seq) {
+               for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+                       key = platform_sysrq_reset_seq[i];
+                       if (key == KEY_RESERVED || key > KEY_MAX)
+                               break;
+
+                       sysrq_reset_seq[sysrq_reset_seq_len++] = key;
+               }
+       }
 
        error = input_register_handler(&sysrq_handler);
        if (error)
@@ -803,6 +902,36 @@ static inline void sysrq_unregister_handler(void)
        }
 }
 
+static int sysrq_reset_seq_param_set(const char *buffer,
+                                    const struct kernel_param *kp)
+{
+       unsigned long val;
+       int error;
+
+       error = strict_strtoul(buffer, 0, &val);
+       if (error < 0)
+               return error;
+
+       if (val > KEY_MAX)
+               return -EINVAL;
+
+       *((unsigned short *)kp->arg) = val;
+       sysrq_reset_seq_version++;
+
+       return 0;
+}
+
+static struct kernel_param_ops param_ops_sysrq_reset_seq = {
+       .get    = param_get_ushort,
+       .set    = sysrq_reset_seq_param_set,
+};
+
+#define param_check_sysrq_reset_seq(name, p)   \
+       __param_check(name, p, unsigned short)
+
+module_param_array_named(reset_seq, sysrq_reset_seq, sysrq_reset_seq,
+                        &sysrq_reset_seq_len, 0644);
+
 #else
 
 static inline void sysrq_register_handler(void)
index 4c90b510d0160c246f1fd5c6c280a88d091c80f8..640ae6c6d2d2ae67f504cb72582c40a8e85c9216 100644 (file)
@@ -37,6 +37,7 @@ config USB_ARCH_HAS_EHCI
        default y if ARCH_W90X900
        default y if ARCH_AT91
        default y if ARCH_MXC
+       default y if ARCH_MXS
        default y if ARCH_OMAP3
        default y if ARCH_CNS3XXX
        default y if ARCH_VT8500
index caecad9213f5cf14f3ef45827548c77e0648dfb9..8e9d31277c436ecce3fdd61ccb78031148a3e53e 100644 (file)
@@ -70,6 +70,9 @@ static int host_start(struct ci13xxx *ci)
        else
                ci->hcd = hcd;
 
+       if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
+               hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
        return ret;
 }
 
index 8d809a811e169fac9ece6259c542c17c4b16f907..2d92cce260d7fd646d13d69e01f2152bbd43a487 100644 (file)
@@ -1602,6 +1602,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
        .driver_info = NO_UNION_NORMAL,
        },
+       { USB_DEVICE(0x05f9, 0x4002), /* PSC Scanning, Magellan 800i */
+       .driver_info = NO_UNION_NORMAL,
+       },
        { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
        .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
        },
index 4225d5e721312a6ec87a96352d05c7db8a682593..8e64adf8e4d56de0409909dd143e9de52f4fefb0 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/unaligned.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
@@ -1025,6 +1026,49 @@ static int register_root_hub(struct usb_hcd *hcd)
        return retval;
 }
 
+/*
+ * usb_hcd_start_port_resume - a root-hub port is sending a resume signal
+ * @bus: the bus which the root hub belongs to
+ * @portnum: the port which is being resumed
+ *
+ * HCDs should call this function when they know that a resume signal is
+ * being sent to a root-hub port.  The root hub will be prevented from
+ * going into autosuspend until usb_hcd_end_port_resume() is called.
+ *
+ * The bus's private lock must be held by the caller.
+ */
+void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum)
+{
+       unsigned bit = 1 << portnum;
+
+       if (!(bus->resuming_ports & bit)) {
+               bus->resuming_ports |= bit;
+               pm_runtime_get_noresume(&bus->root_hub->dev);
+       }
+}
+EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume);
+
+/*
+ * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal
+ * @bus: the bus which the root hub belongs to
+ * @portnum: the port which is being resumed
+ *
+ * HCDs should call this function when they know that a resume signal has
+ * stopped being sent to a root-hub port.  The root hub will be allowed to
+ * autosuspend again.
+ *
+ * The bus's private lock must be held by the caller.
+ */
+void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum)
+{
+       unsigned bit = 1 << portnum;
+
+       if (bus->resuming_ports & bit) {
+               bus->resuming_ports &= ~bit;
+               pm_runtime_put_noidle(&bus->root_hub->dev);
+       }
+}
+EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
 
 /*-------------------------------------------------------------------------*/
 
index a815fd2cc5e729287fe24b8a4b9bb4fc21082d6b..cbf7168e3ce7062c7610e1dfe0c13b6e49f9ff86 100644 (file)
@@ -877,6 +877,60 @@ static int hub_hub_status(struct usb_hub *hub,
        return ret;
 }
 
+static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+                       unsigned int link_status)
+{
+       return set_port_feature(hub->hdev,
+                       port1 | (link_status << 3),
+                       USB_PORT_FEAT_LINK_STATE);
+}
+
+/*
+ * If USB 3.0 ports are placed into the Disabled state, they will no longer
+ * detect any device connects or disconnects.  This is generally not what the
+ * USB core wants, since it expects a disabled port to produce a port status
+ * change event when a new device connects.
+ *
+ * Instead, set the link state to Disabled, wait for the link to settle into
+ * that state, clear any change bits, and then put the port into the RxDetect
+ * state.
+ */
+static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+{
+       int ret;
+       int total_time;
+       u16 portchange, portstatus;
+
+       if (!hub_is_superspeed(hub->hdev))
+               return -EINVAL;
+
+       ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+       if (ret) {
+               dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
+                               port1, ret);
+               return ret;
+       }
+
+       /* Wait for the link to enter the disabled state. */
+       for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+               ret = hub_port_status(hub, port1, &portstatus, &portchange);
+               if (ret < 0)
+                       return ret;
+
+               if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                               USB_SS_PORT_LS_SS_DISABLED)
+                       break;
+               if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+                       break;
+               msleep(HUB_DEBOUNCE_STEP);
+       }
+       if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+               dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
+                               port1, total_time);
+
+       return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+}
+
 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
 {
        struct usb_device *hdev = hub->hdev;
@@ -885,8 +939,13 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
        if (hub->ports[port1 - 1]->child && set_state)
                usb_set_device_state(hub->ports[port1 - 1]->child,
                                USB_STATE_NOTATTACHED);
-       if (!hub->error && !hub_is_superspeed(hub->hdev))
-               ret = clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
+       if (!hub->error) {
+               if (hub_is_superspeed(hub->hdev))
+                       ret = hub_usb3_port_disable(hub, port1);
+               else
+                       ret = clear_port_feature(hdev, port1,
+                                       USB_PORT_FEAT_ENABLE);
+       }
        if (ret)
                dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
                                port1, ret);
@@ -2440,7 +2499,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
 #define HUB_SHORT_RESET_TIME   10
 #define HUB_BH_RESET_TIME      50
 #define HUB_LONG_RESET_TIME    200
-#define HUB_RESET_TIMEOUT      500
+#define HUB_RESET_TIMEOUT      800
 
 static int hub_port_reset(struct usb_hub *hub, int port1,
                        struct usb_device *udev, unsigned int delay, bool warm);
@@ -2475,6 +2534,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                if (ret < 0)
                        return ret;
 
+               /* The port state is unknown until the reset completes. */
+               if ((portstatus & USB_PORT_STAT_RESET))
+                       goto delay;
+
                /*
                 * Some buggy devices require a warm reset to be issued even
                 * when the port appears not to be connected.
@@ -2520,11 +2583,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                        if ((portchange & USB_PORT_STAT_C_CONNECTION))
                                return -ENOTCONN;
 
-                       /* if we`ve finished resetting, then break out of
-                        * the loop
-                        */
-                       if (!(portstatus & USB_PORT_STAT_RESET) &&
-                           (portstatus & USB_PORT_STAT_ENABLE)) {
+                       if ((portstatus & USB_PORT_STAT_ENABLE)) {
                                if (hub_is_wusb(hub))
                                        udev->speed = USB_SPEED_WIRELESS;
                                else if (hub_is_superspeed(hub->hdev))
@@ -2538,10 +2597,15 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                                return 0;
                        }
                } else {
-                       if (portchange & USB_PORT_STAT_C_BH_RESET)
-                               return 0;
+                       if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
+                                       hub_port_warm_reset_required(hub,
+                                               portstatus))
+                               return -ENOTCONN;
+
+                       return 0;
                }
 
+delay:
                /* switch to the long delay after two short delay failures */
                if (delay_time >= 2 * HUB_SHORT_RESET_TIME)
                        delay = HUB_LONG_RESET_TIME;
@@ -2565,14 +2629,11 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
                        msleep(10 + 40);
                        update_devnum(udev, 0);
                        hcd = bus_to_hcd(udev->bus);
-                       if (hcd->driver->reset_device) {
-                               *status = hcd->driver->reset_device(hcd, udev);
-                               if (*status < 0) {
-                                       dev_err(&udev->dev, "Cannot reset "
-                                                       "HCD device state\n");
-                                       break;
-                               }
-                       }
+                       /* The xHC may think the device is already reset,
+                        * so ignore the status.
+                        */
+                       if (hcd->driver->reset_device)
+                               hcd->driver->reset_device(hcd, udev);
                }
                /* FALL THROUGH */
        case -ENOTCONN:
@@ -2580,16 +2641,16 @@ static void hub_port_finish_reset(struct usb_hub *hub, int port1,
                clear_port_feature(hub->hdev,
                                port1, USB_PORT_FEAT_C_RESET);
                /* FIXME need disconnect() for NOTATTACHED device */
-               if (warm) {
+               if (hub_is_superspeed(hub->hdev)) {
                        clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_BH_PORT_RESET);
                        clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_PORT_LINK_STATE);
-               } else {
+               }
+               if (!warm)
                        usb_set_device_state(udev, *status
                                        ? USB_STATE_NOTATTACHED
                                        : USB_STATE_DEFAULT);
-               }
                break;
        }
 }
@@ -2777,6 +2838,23 @@ void usb_enable_ltm(struct usb_device *udev)
 EXPORT_SYMBOL_GPL(usb_enable_ltm);
 
 #ifdef CONFIG_USB_SUSPEND
+/*
+ * usb_disable_function_remotewakeup - disable usb3.0
+ * device's function remote wakeup
+ * @udev: target device
+ *
+ * Assume there's only one function on the USB 3.0
+ * device and disable remote wake for the first
+ * interface. FIXME if the interface association
+ * descriptor shows there's more than one function.
+ */
+static int usb_disable_function_remotewakeup(struct usb_device *udev)
+{
+       return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+                               USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
+                               USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
+                               USB_CTRL_SET_TIMEOUT);
+}
 
 /*
  * usb_port_suspend - suspend a usb device's upstream port
@@ -2894,12 +2972,19 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
                dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
                                port1, status);
                /* paranoia:  "should not happen" */
-               if (udev->do_remote_wakeup)
-                       (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
-                               USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
-                               USB_DEVICE_REMOTE_WAKEUP, 0,
-                               NULL, 0,
-                               USB_CTRL_SET_TIMEOUT);
+               if (udev->do_remote_wakeup) {
+                       if (!hub_is_superspeed(hub->hdev)) {
+                               (void) usb_control_msg(udev,
+                                               usb_sndctrlpipe(udev, 0),
+                                               USB_REQ_CLEAR_FEATURE,
+                                               USB_RECIP_DEVICE,
+                                               USB_DEVICE_REMOTE_WAKEUP, 0,
+                                               NULL, 0,
+                                               USB_CTRL_SET_TIMEOUT);
+                       } else
+                               (void) usb_disable_function_remotewakeup(udev);
+
+               }
 
                /* Try to enable USB2 hardware LPM again */
                if (udev->usb2_hw_lpm_capable == 1)
@@ -2939,7 +3024,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
 static int finish_port_resume(struct usb_device *udev)
 {
        int     status = 0;
-       u16     devstatus;
+       u16     devstatus = 0;
 
        /* caller owns the udev device lock */
        dev_dbg(&udev->dev, "%s\n",
@@ -2984,21 +3069,37 @@ static int finish_port_resume(struct usb_device *udev)
        if (status) {
                dev_dbg(&udev->dev, "gone after usb resume? status %d\n",
                                status);
-       } else if (udev->actconfig) {
-               le16_to_cpus(&devstatus);
-               if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
-                       status = usb_control_msg(udev,
-                                       usb_sndctrlpipe(udev, 0),
-                                       USB_REQ_CLEAR_FEATURE,
+       /*
+        * There are a few quirky devices which violate the standard
+        * by claiming to have remote wakeup enabled after a reset,
+        * which crash if the feature is cleared, hence check for
+        * udev->reset_resume
+        */
+       } else if (udev->actconfig && !udev->reset_resume) {
+               if (!hub_is_superspeed(udev->parent)) {
+                       le16_to_cpus(&devstatus);
+                       if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
+                               status = usb_control_msg(udev,
+                                               usb_sndctrlpipe(udev, 0),
+                                               USB_REQ_CLEAR_FEATURE,
                                                USB_RECIP_DEVICE,
-                                       USB_DEVICE_REMOTE_WAKEUP, 0,
-                                       NULL, 0,
-                                       USB_CTRL_SET_TIMEOUT);
-                       if (status)
-                               dev_dbg(&udev->dev,
-                                       "disable remote wakeup, status %d\n",
-                                       status);
+                                               USB_DEVICE_REMOTE_WAKEUP, 0,
+                                               NULL, 0,
+                                               USB_CTRL_SET_TIMEOUT);
+               } else {
+                       status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
+                                       &devstatus);
+                       le16_to_cpus(&devstatus);
+                       if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
+                                       | USB_INTRF_STAT_FUNC_RW))
+                               status =
+                                       usb_disable_function_remotewakeup(udev);
                }
+
+               if (status)
+                       dev_dbg(&udev->dev,
+                               "disable remote wakeup, status %d\n",
+                               status);
                status = 0;
        }
        return status;
@@ -4638,9 +4739,14 @@ static void hub_events(void)
                         * SS.Inactive state.
                         */
                        if (hub_port_warm_reset_required(hub, portstatus)) {
+                               int status;
+
                                dev_dbg(hub_dev, "warm reset port %d\n", i);
-                               hub_port_reset(hub, i, NULL,
+                               status = hub_port_reset(hub, i, NULL,
                                                HUB_BH_RESET_TIME, true);
+                               if (status < 0)
+                                       hub_port_disable(hub, i, 1);
+                               connect_change = 0;
                        }
 
                        if (connect_change)
index fdefd9c7f7af089e54428b57625d394c70566561..3113c1d71442953d3f9659afff042064bca8a9b9 100644 (file)
@@ -43,6 +43,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Microsoft LifeCam-VX700 v2.0 */
+       { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech Quickcam Fusion */
        { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 92604b4f9712630e83898c83df01b985e8b5a631..5945aadaa1c99a41df4edff373bbfbad080e34fe 100644 (file)
@@ -56,7 +56,7 @@
 #define dump_register(nm)                              \
 {                                                      \
        .name   = __stringify(nm),                      \
-       .offset = DWC3_ ##nm,                           \
+       .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
 }
 
 static const struct debugfs_reg32 dwc3_regs[] = {
index 2e43b332aae8b891a5d0e0988172791e4b7a1f88..2fdd767f8fe890defe831c958d1d723af4b060ae 100644 (file)
@@ -1605,6 +1605,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
 
                if (epnum == 0 || epnum == 1) {
                        dep->endpoint.maxpacket = 512;
+                       dep->endpoint.maxburst = 1;
                        dep->endpoint.ops = &dwc3_gadget_ep0_ops;
                        if (!epnum)
                                dwc->gadget.ep0 = &dep->endpoint;
index fc0ec5e0d58ef43a11f525148dca69d1cb442188..d9f6b9372491d487a817de021f8bfc660d04cd00 100644 (file)
@@ -3231,7 +3231,7 @@ static int udc_pci_probe(
        }
 
        if (!pdev->irq) {
-               dev_err(&dev->pdev->dev, "irq not set\n");
+               dev_err(&pdev->dev, "irq not set\n");
                kfree(dev);
                dev = NULL;
                retval = -ENODEV;
@@ -3250,7 +3250,7 @@ static int udc_pci_probe(
        dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
 
        if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
-               dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
+               dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
                kfree(dev);
                dev = NULL;
                retval = -EBUSY;
index 95d584dbed13b08516f23d90d83acb973d989a3a..8cf0c0f6fa1fb83bab5f26ab4d54a24e7bda3875 100644 (file)
@@ -130,10 +130,7 @@ static const char ep0name[] = "ep0";
 static const char *const ep_name[] = {
        ep0name,                                /* everyone has ep0 */
 
-       /* act like a net2280: high speed, six configurable endpoints */
-       "ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
-
-       /* or like pxa250: fifteen fixed function endpoints */
+       /* act like a pxa250: fifteen fixed function endpoints */
        "ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
        "ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
        "ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
@@ -141,6 +138,10 @@ static const char *const ep_name[] = {
 
        /* or like sa1100: two fixed function endpoints */
        "ep1out-bulk", "ep2in-bulk",
+
+       /* and now some generic EPs so we have enough in multi config */
+       "ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
+       "ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
 };
 #define DUMMY_ENDPOINTS        ARRAY_SIZE(ep_name)
 
index 4a6961c517f261103f006259bab366265dde7e4c..8c2f251211491258c2c83396b558132444e7ec9d 100644 (file)
@@ -1153,15 +1153,15 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
                                        pr_err("%s: unmapped value: %lu\n", opts, value);
                                        return -EINVAL;
                                }
-                       }
-                       else if (!memcmp(opts, "gid", 3))
+                       } else if (!memcmp(opts, "gid", 3)) {
                                data->perms.gid = make_kgid(current_user_ns(), value);
                                if (!gid_valid(data->perms.gid)) {
                                        pr_err("%s: unmapped value: %lu\n", opts, value);
                                        return -EINVAL;
                                }
-                       else
+                       } else {
                                goto invalid;
+                       }
                        break;
 
                default:
index 1b0f086426bd92648b5b6ad0e743c2530601a2c2..d3bd7b095ba37713dfc3bdf1faaa39ddded393ac 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/io.h>
 
-#include <mach/hardware.h>
-
 static struct clk *mxc_ahb_clk;
 static struct clk *mxc_per_clk;
 static struct clk *mxc_ipg_clk;
 
 /* workaround ENGcm09152 for i.MX35 */
-#define USBPHYCTRL_OTGBASE_OFFSET      0x608
+#define MX35_USBPHYCTRL_OFFSET         0x600
+#define USBPHYCTRL_OTGBASE_OFFSET      0x8
 #define USBPHYCTRL_EVDO                        (1 << 23)
 
 int fsl_udc_clk_init(struct platform_device *pdev)
@@ -59,7 +58,7 @@ int fsl_udc_clk_init(struct platform_device *pdev)
        clk_prepare_enable(mxc_per_clk);
 
        /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
-       if (!cpu_is_mx51()) {
+       if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
                freq = clk_get_rate(mxc_per_clk);
                if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
                    (freq < 59999000 || freq > 60001000)) {
@@ -79,27 +78,40 @@ eclkrate:
        return ret;
 }
 
-void fsl_udc_clk_finalize(struct platform_device *pdev)
+int fsl_udc_clk_finalize(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-       if (cpu_is_mx35()) {
-               unsigned int v;
+       int ret = 0;
 
-               /* workaround ENGcm09152 for i.MX35 */
-               if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
-                       v = readl(MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
-                                       USBPHYCTRL_OTGBASE_OFFSET));
-                       writel(v | USBPHYCTRL_EVDO,
-                               MX35_IO_ADDRESS(MX35_USB_BASE_ADDR +
-                                       USBPHYCTRL_OTGBASE_OFFSET));
+       /* workaround ENGcm09152 for i.MX35 */
+       if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
+               unsigned int v;
+               struct resource *res = platform_get_resource
+                       (pdev, IORESOURCE_MEM, 0);
+               void __iomem *phy_regs = ioremap(res->start +
+                                               MX35_USBPHYCTRL_OFFSET, 512);
+               if (!phy_regs) {
+                       dev_err(&pdev->dev, "ioremap for phy address fails\n");
+                       ret = -EINVAL;
+                       goto ioremap_err;
                }
+
+               v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
+               writel(v | USBPHYCTRL_EVDO,
+                       phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
+
+               iounmap(phy_regs);
        }
 
+
+ioremap_err:
        /* ULPI transceivers don't need usbpll */
        if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
                clk_disable_unprepare(mxc_per_clk);
                mxc_per_clk = NULL;
        }
+
+       return ret;
 }
 
 void fsl_udc_clk_release(void)
index c19f7f13790bf5be541b5e854beab49c969443ef..667275cb7bad2fba361bc305348c5536f5116f93 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/fsl_devices.h>
 #include <linux/dmapool.h>
 #include <linux/delay.h>
+#include <linux/of_device.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
@@ -2438,11 +2439,6 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
        unsigned int i;
        u32 dccparams;
 
-       if (strcmp(pdev->name, driver_name)) {
-               VDBG("Wrong device");
-               return -ENODEV;
-       }
-
        udc_controller = kzalloc(sizeof(struct fsl_udc), GFP_KERNEL);
        if (udc_controller == NULL) {
                ERR("malloc udc failed\n");
@@ -2547,7 +2543,9 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
                dr_controller_setup(udc_controller);
        }
 
-       fsl_udc_clk_finalize(pdev);
+       ret = fsl_udc_clk_finalize(pdev);
+       if (ret)
+               goto err_free_irq;
 
        /* Setup gadget structure */
        udc_controller->gadget.ops = &fsl_gadget_ops;
@@ -2756,22 +2754,32 @@ static int fsl_udc_otg_resume(struct device *dev)
 
        return fsl_udc_resume(NULL);
 }
-
 /*-------------------------------------------------------------------------
        Register entry point for the peripheral controller driver
 --------------------------------------------------------------------------*/
-
+static const struct platform_device_id fsl_udc_devtype[] = {
+       {
+               .name = "imx-udc-mx27",
+       }, {
+               .name = "imx-udc-mx51",
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
 static struct platform_driver udc_driver = {
-       .remove  = __exit_p(fsl_udc_remove),
+       .remove         = __exit_p(fsl_udc_remove),
+       /* Just for FSL i.mx SoC currently */
+       .id_table       = fsl_udc_devtype,
        /* these suspend and resume are not usb suspend and resume */
-       .suspend = fsl_udc_suspend,
-       .resume  = fsl_udc_resume,
-       .driver  = {
-               .name = (char *)driver_name,
-               .owner = THIS_MODULE,
-               /* udc suspend/resume called from OTG driver */
-               .suspend = fsl_udc_otg_suspend,
-               .resume  = fsl_udc_otg_resume,
+       .suspend        = fsl_udc_suspend,
+       .resume         = fsl_udc_resume,
+       .driver         = {
+                       .name = (char *)driver_name,
+                       .owner = THIS_MODULE,
+                       /* udc suspend/resume called from OTG driver */
+                       .suspend = fsl_udc_otg_suspend,
+                       .resume  = fsl_udc_otg_resume,
        },
 };
 
index f61a967f70828dd21c7c7e401388610e6b62ce74..c6703bb07b23fcd026f450a581cf974a218013ad 100644 (file)
@@ -592,15 +592,16 @@ static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
 struct platform_device;
 #ifdef CONFIG_ARCH_MXC
 int fsl_udc_clk_init(struct platform_device *pdev);
-void fsl_udc_clk_finalize(struct platform_device *pdev);
+int fsl_udc_clk_finalize(struct platform_device *pdev);
 void fsl_udc_clk_release(void);
 #else
 static inline int fsl_udc_clk_init(struct platform_device *pdev)
 {
        return 0;
 }
-static inline void fsl_udc_clk_finalize(struct platform_device *pdev)
+static inline int fsl_udc_clk_finalize(struct platform_device *pdev)
 {
+       return 0;
 }
 static inline void fsl_udc_clk_release(void)
 {
index 379aac7b82fcf62d57b5ef41e1054c2b22e8ae71..6e8b1272ebceb3e1317e092ae7243e584c6eeda4 100644 (file)
@@ -1012,7 +1012,7 @@ static void udc_clock_enable(struct mv_udc *udc)
        unsigned int i;
 
        for (i = 0; i < udc->clknum; i++)
-               clk_enable(udc->clk[i]);
+               clk_prepare_enable(udc->clk[i]);
 }
 
 static void udc_clock_disable(struct mv_udc *udc)
@@ -1020,7 +1020,7 @@ static void udc_clock_disable(struct mv_udc *udc)
        unsigned int i;
 
        for (i = 0; i < udc->clknum; i++)
-               clk_disable(udc->clk[i]);
+               clk_disable_unprepare(udc->clk[i]);
 }
 
 static void udc_stop(struct mv_udc *udc)
index 141971d9051eab7d16dca79f0e652917171336e9..439c3f972f8c28d18fb34c691ab9cf8b51559098 100644 (file)
@@ -3477,12 +3477,11 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
 /**
  * s3c_hsotg_release - release callback for hsotg device
  * @dev: Device to for which release is called
+ *
+ * Nothing to do as the resource is allocated using devm_ API.
  */
 static void s3c_hsotg_release(struct device *dev)
 {
-       struct s3c_hsotg *hsotg = dev_get_drvdata(dev);
-
-       kfree(hsotg);
 }
 
 /**
index 4f7f76f00c7452239067a5b5498a500d26093f50..7cacd6ae818e3ef957efe728eeb9641ccbf42071 100644 (file)
@@ -1794,9 +1794,10 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
        tpg->tpg_nexus = NULL;
 
        kfree(tv_nexus);
+       ret = 0;
 out:
        mutex_unlock(&tpg->tpg_mutex);
-       return 0;
+       return ret;
 }
 
 static ssize_t tcm_usbg_tpg_store_nexus(
index d0f95482f40e8058a24f27b161f33beb55bb3c4d..598dcc1212f0bb231830f0022961f67e37b6b0fa 100644 (file)
@@ -887,7 +887,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
        pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
                        port->port_num, tty, file);
 
-       wake_up_interruptible(&port->port.close_wait);
+       wake_up(&port->port.close_wait);
 exit:
        spin_unlock_irq(&port->port_lock);
 }
index d6bb128ce21ee424a02dfb7db31c96bc88c9af44..3a21c5d683c04f0380aa1606045b35375d26143c 100644 (file)
@@ -148,7 +148,7 @@ config USB_EHCI_FSL
          Variation of ARC USB block used in some Freescale chips.
 
 config USB_EHCI_MXC
-       bool "Support for Freescale i.MX on-chip EHCI USB controller"
+       tristate "Support for Freescale i.MX on-chip EHCI USB controller"
        depends on USB_EHCI_HCD && ARCH_MXC
        select USB_EHCI_ROOT_HUB_TT
        ---help---
index 1eb4c3006e9eab8b7d2588c0322d38b9ad724327..001fbff2fdefedbb346f71db705f8e687201e454 100644 (file)
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI)             += pci-quirks.o
 obj-$(CONFIG_USB_EHCI_HCD)     += ehci-hcd.o
 obj-$(CONFIG_USB_EHCI_PCI)     += ehci-pci.o
 obj-$(CONFIG_USB_EHCI_HCD_PLATFORM)    += ehci-platform.o
+obj-$(CONFIG_USB_EHCI_MXC)     += ehci-mxc.o
 
 obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
 obj-$(CONFIG_USB_ISP116X_HCD)  += isp116x-hcd.o
index fd9b5424b860a20e7d04c54fe3cf4fc76ca3be2d..d81d2fcbff1894d79f0606d831d91b628c44f71e 100644 (file)
@@ -230,7 +230,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
 
        switch (phy_mode) {
        case FSL_USB2_PHY_ULPI:
-               if (pdata->controller_ver) {
+               if (pdata->have_sysif_regs && pdata->controller_ver) {
                        /* controller version 1.6 or above */
                        setbits32(non_ehci + FSL_SOC_USB_CTRL,
                                        ULPI_PHY_CLK_SEL);
@@ -251,7 +251,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
                portsc |= PORT_PTS_PTW;
                /* fall through */
        case FSL_USB2_PHY_UTMI:
-               if (pdata->controller_ver) {
+               if (pdata->have_sysif_regs && pdata->controller_ver) {
                        /* controller version 1.6 or above */
                        setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
                        mdelay(FSL_UTMI_PHY_DLY);  /* Delay for UTMI PHY CLK to
@@ -267,7 +267,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
                break;
        }
 
-       if (pdata->controller_ver && (phy_mode == FSL_USB2_PHY_ULPI)) {
+       if (pdata->have_sysif_regs && pdata->controller_ver &&
+           (phy_mode == FSL_USB2_PHY_ULPI)) {
                /* check PHY_CLK_VALID to get phy clk valid */
                if (!spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
                                PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0)) {
@@ -278,7 +279,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
 
        ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
 
-       if (phy_mode != FSL_USB2_PHY_ULPI)
+       if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
                setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
 
        return 0;
index c97503bb0b0e064a027575f9eb0d28094d3fda80..b416a3fc99594f30afbb15995191313f6c9315a6 100644 (file)
@@ -74,10 +74,6 @@ static const char    hcd_name [] = "ehci_hcd";
 #undef VERBOSE_DEBUG
 #undef EHCI_URB_TRACE
 
-#ifdef DEBUG
-#define EHCI_STATS
-#endif
-
 /* magic numbers that can affect system performance */
 #define        EHCI_TUNE_CERR          3       /* 0-3 qtd retries; 0 == don't stop */
 #define        EHCI_TUNE_RL_HS         4       /* nak throttle; see 4.9 */
@@ -801,6 +797,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
                        ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
                        set_bit(i, &ehci->resuming_ports);
                        ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
+                       usb_hcd_start_port_resume(&hcd->self, i);
                        mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
                }
        }
@@ -1250,11 +1247,6 @@ MODULE_LICENSE ("GPL");
 #define        PLATFORM_DRIVER         ehci_fsl_driver
 #endif
 
-#ifdef CONFIG_USB_EHCI_MXC
-#include "ehci-mxc.c"
-#define PLATFORM_DRIVER                ehci_mxc_driver
-#endif
-
 #ifdef CONFIG_USB_EHCI_SH
 #include "ehci-sh.c"
 #define PLATFORM_DRIVER                ehci_hcd_sh_driver
@@ -1352,7 +1344,8 @@ MODULE_LICENSE ("GPL");
 
 #if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
        !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
-       !defined(CONFIG_USB_CHIPIDEA_HOST) && \
+       !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
+       !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
        !defined(PLATFORM_DRIVER) && \
        !defined(PS3_SYSTEM_BUS_DRIVER) && \
        !defined(OF_PLATFORM_DRIVER) && \
index 4ccb97c0678f93fd8379af59584bd79e8b8d10ef..4d3b294f203e3acefe469db340d695786b2ffc3f 100644 (file)
@@ -649,7 +649,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
                        status = STS_PCD;
                }
        }
-       /* FIXME autosuspend idle root hubs */
+
+       /* If a resume is in progress, make sure it can finish */
+       if (ehci->resuming_ports)
+               mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25));
+
        spin_unlock_irqrestore (&ehci->lock, flags);
        return status ? retval : 0;
 }
@@ -851,6 +855,7 @@ static int ehci_hub_control (
                                /* resume signaling for 20 msec */
                                ehci->reset_done[wIndex] = jiffies
                                                + msecs_to_jiffies(20);
+                               usb_hcd_start_port_resume(&hcd->self, wIndex);
                                /* check the port again */
                                mod_timer(&ehci_to_hcd(ehci)->rh_timer,
                                                ehci->reset_done[wIndex]);
@@ -862,6 +867,7 @@ static int ehci_hub_control (
                                clear_bit(wIndex, &ehci->suspended_ports);
                                set_bit(wIndex, &ehci->port_c_suspend);
                                ehci->reset_done[wIndex] = 0;
+                               usb_hcd_end_port_resume(&hcd->self, wIndex);
 
                                /* stop resume signaling */
                                temp = ehci_readl(ehci, status_reg);
@@ -950,6 +956,7 @@ static int ehci_hub_control (
                        ehci->reset_done[wIndex] = 0;
                        if (temp & PORT_PE)
                                set_bit(wIndex, &ehci->port_c_suspend);
+                       usb_hcd_end_port_resume(&hcd->self, wIndex);
                }
 
                if (temp & PORT_OC)
index f7bfc0b898b97a90dfece792fe40ca16ef4a2f73..6c56297ea16b8f59bd887f9025cd68ca46a2be96 100644 (file)
@@ -43,7 +43,7 @@ static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
        unsigned int i;
 
        for (i = 0; i < ehci_mv->clknum; i++)
-               clk_enable(ehci_mv->clk[i]);
+               clk_prepare_enable(ehci_mv->clk[i]);
 }
 
 static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
@@ -51,7 +51,7 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
        unsigned int i;
 
        for (i = 0; i < ehci_mv->clknum; i++)
-               clk_disable(ehci_mv->clk[i]);
+               clk_disable_unprepare(ehci_mv->clk[i]);
 }
 
 static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
index ec7f5d2c90de3464f66a000583fa151fde29946e..dedb80bb8d40a5c37132fbdb0d0c569534aeb9c8 100644 (file)
  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
 
 #include <linux/platform_data/usb-ehci-mxc.h>
 
 #include <asm/mach-types.h>
 
+#include "ehci.h"
+
+#define DRIVER_DESC "Freescale On-Chip EHCI Host driver"
+
+static const char hcd_name[] = "ehci-mxc";
+
 #define ULPI_VIEWPORT_OFFSET   0x170
 
 struct ehci_mxc_priv {
        struct clk *usbclk, *ahbclk, *phyclk;
-       struct usb_hcd *hcd;
 };
 
-/* called during probe() after chip reset completes */
-static int ehci_mxc_setup(struct usb_hcd *hcd)
-{
-       hcd->has_tt = 1;
-
-       return ehci_setup(hcd);
-}
+static struct hc_driver __read_mostly ehci_mxc_hc_driver;
 
-static const struct hc_driver ehci_mxc_hc_driver = {
-       .description = hcd_name,
-       .product_desc = "Freescale On-Chip EHCI Host Controller",
-       .hcd_priv_size = sizeof(struct ehci_hcd),
-
-       /*
-        * generic hardware linkage
-        */
-       .irq = ehci_irq,
-       .flags = HCD_USB2 | HCD_MEMORY,
-
-       /*
-        * basic lifecycle operations
-        */
-       .reset = ehci_mxc_setup,
-       .start = ehci_run,
-       .stop = ehci_stop,
-       .shutdown = ehci_shutdown,
-
-       /*
-        * managing i/o requests and associated device resources
-        */
-       .urb_enqueue = ehci_urb_enqueue,
-       .urb_dequeue = ehci_urb_dequeue,
-       .endpoint_disable = ehci_endpoint_disable,
-       .endpoint_reset = ehci_endpoint_reset,
-
-       /*
-        * scheduling support
-        */
-       .get_frame_number = ehci_get_frame,
-
-       /*
-        * root hub support
-        */
-       .hub_status_data = ehci_hub_status_data,
-       .hub_control = ehci_hub_control,
-       .bus_suspend = ehci_bus_suspend,
-       .bus_resume = ehci_bus_resume,
-       .relinquish_port = ehci_relinquish_port,
-       .port_handed_over = ehci_port_handed_over,
-
-       .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+static const struct ehci_driver_overrides ehci_mxc_overrides __initdata = {
+       .extra_priv_size =      sizeof(struct ehci_mxc_priv),
 };
 
 static int ehci_mxc_drv_probe(struct platform_device *pdev)
@@ -112,12 +75,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
        if (!hcd)
                return -ENOMEM;
 
-       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               ret = -ENOMEM;
-               goto err_alloc;
-       }
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "Found HC with no register addr. Check setup!\n");
@@ -135,6 +92,10 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                goto err_alloc;
        }
 
+       hcd->has_tt = 1;
+       ehci = hcd_to_ehci(hcd);
+       priv = (struct ehci_mxc_priv *) ehci->priv;
+
        /* enable clocks */
        priv->usbclk = devm_clk_get(&pdev->dev, "ipg");
        if (IS_ERR(priv->usbclk)) {
@@ -169,8 +130,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                mdelay(10);
        }
 
-       ehci = hcd_to_ehci(hcd);
-
        /* EHCI registers start at offset 0x100 */
        ehci->caps = hcd->regs + 0x100;
        ehci->regs = hcd->regs + 0x100 +
@@ -198,8 +157,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
                }
        }
 
-       priv->hcd = hcd;
-       platform_set_drvdata(pdev, priv);
+       platform_set_drvdata(pdev, hcd);
 
        ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
        if (ret)
@@ -244,8 +202,11 @@ err_alloc:
 static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
 {
        struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
-       struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
-       struct usb_hcd *hcd = priv->hcd;
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+       struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
+
+       usb_remove_hcd(hcd);
 
        if (pdata && pdata->exit)
                pdata->exit(pdev);
@@ -253,23 +214,20 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
        if (pdata->otg)
                usb_phy_shutdown(pdata->otg);
 
-       usb_remove_hcd(hcd);
-       usb_put_hcd(hcd);
-       platform_set_drvdata(pdev, NULL);
-
        clk_disable_unprepare(priv->usbclk);
        clk_disable_unprepare(priv->ahbclk);
 
        if (priv->phyclk)
                clk_disable_unprepare(priv->phyclk);
 
+       usb_put_hcd(hcd);
+       platform_set_drvdata(pdev, NULL);
        return 0;
 }
 
 static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
 {
-       struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
-       struct usb_hcd *hcd = priv->hcd;
+       struct usb_hcd *hcd = platform_get_drvdata(pdev);
 
        if (hcd->driver->shutdown)
                hcd->driver->shutdown(hcd);
@@ -279,9 +237,31 @@ MODULE_ALIAS("platform:mxc-ehci");
 
 static struct platform_driver ehci_mxc_driver = {
        .probe = ehci_mxc_drv_probe,
-       .remove = __exit_p(ehci_mxc_drv_remove),
+       .remove = ehci_mxc_drv_remove,
        .shutdown = ehci_mxc_drv_shutdown,
        .driver = {
                   .name = "mxc-ehci",
        },
 };
+
+static int __init ehci_mxc_init(void)
+{
+       if (usb_disabled())
+               return -ENODEV;
+
+       pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+
+       ehci_init_driver(&ehci_mxc_hc_driver, &ehci_mxc_overrides);
+       return platform_driver_register(&ehci_mxc_driver);
+}
+module_init(ehci_mxc_init);
+
+static void __exit ehci_mxc_cleanup(void)
+{
+       platform_driver_unregister(&ehci_mxc_driver);
+}
+module_exit(ehci_mxc_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Sascha Hauer");
+MODULE_LICENSE("GPL");
index dabb20494826f1be237c9a08b6cea7569be46dd7..170b9399e09f34b42ec798b8e02985c5b0f71190 100644 (file)
@@ -200,6 +200,26 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                break;
        }
 
+       /* optional debug port, normally in the first BAR */
+       temp = pci_find_capability(pdev, PCI_CAP_ID_DBG);
+       if (temp) {
+               pci_read_config_dword(pdev, temp, &temp);
+               temp >>= 16;
+               if (((temp >> 13) & 7) == 1) {
+                       u32 hcs_params = ehci_readl(ehci,
+                                                   &ehci->caps->hcs_params);
+
+                       temp &= 0x1fff;
+                       ehci->debug = hcd->regs + temp;
+                       temp = ehci_readl(ehci, &ehci->debug->control);
+                       ehci_info(ehci, "debug port %d%s\n",
+                                 HCS_DEBUG_PORT(hcs_params),
+                                 (temp & DBGP_ENABLED) ? " IN USE" : "");
+                       if (!(temp & DBGP_ENABLED))
+                               ehci->debug = NULL;
+               }
+       }
+
        retval = ehci_setup(hcd);
        if (retval)
                return retval;
@@ -228,25 +248,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                break;
        }
 
-       /* optional debug port, normally in the first BAR */
-       temp = pci_find_capability(pdev, 0x0a);
-       if (temp) {
-               pci_read_config_dword(pdev, temp, &temp);
-               temp >>= 16;
-               if ((temp & (3 << 13)) == (1 << 13)) {
-                       temp &= 0x1fff;
-                       ehci->debug = hcd->regs + temp;
-                       temp = ehci_readl(ehci, &ehci->debug->control);
-                       ehci_info(ehci, "debug port %d%s\n",
-                               HCS_DEBUG_PORT(ehci->hcs_params),
-                               (temp & DBGP_ENABLED)
-                                       ? " IN USE"
-                                       : "");
-                       if (!(temp & DBGP_ENABLED))
-                               ehci->debug = NULL;
-               }
-       }
-
        /* at least the Genesys GL880S needs fixup here */
        temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
        temp &= 0x0f;
index 3d989028c8365e6644b86428fea2f24cfba9be6e..fd252f0cfb3a7873ec7bad8226a036a9668b182e 100644 (file)
@@ -1197,17 +1197,26 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
        if (ehci->async_iaa || ehci->async_unlinking)
                return;
 
-       /* Do all the waiting QHs at once */
-       ehci->async_iaa = ehci->async_unlink;
-       ehci->async_unlink = NULL;
-
        /* If the controller isn't running, we don't have to wait for it */
        if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
+
+               /* Do all the waiting QHs */
+               ehci->async_iaa = ehci->async_unlink;
+               ehci->async_unlink = NULL;
+
                if (!nested)            /* Avoid recursion */
                        end_unlink_async(ehci);
 
        /* Otherwise start a new IAA cycle */
        } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+               struct ehci_qh          *qh;
+
+               /* Do only the first waiting QH (nVidia bug?) */
+               qh = ehci->async_unlink;
+               ehci->async_iaa = qh;
+               ehci->async_unlink = qh->unlink_next;
+               qh->unlink_next = NULL;
+
                /* Make sure the unlinks are all visible to the hardware */
                wmb();
 
@@ -1255,34 +1264,35 @@ static void end_unlink_async(struct ehci_hcd *ehci)
        }
 }
 
+static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh);
+
 static void unlink_empty_async(struct ehci_hcd *ehci)
 {
-       struct ehci_qh          *qh, *next;
-       bool                    stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+       struct ehci_qh          *qh;
+       struct ehci_qh          *qh_to_unlink = NULL;
        bool                    check_unlinks_later = false;
+       int                     count = 0;
 
-       /* Unlink all the async QHs that have been empty for a timer cycle */
-       next = ehci->async->qh_next.qh;
-       while (next) {
-               qh = next;
-               next = qh->qh_next.qh;
-
+       /* Find the last async QH which has been empty for a timer cycle */
+       for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) {
                if (list_empty(&qh->qtd_list) &&
                                qh->qh_state == QH_STATE_LINKED) {
-                       if (!stopped && qh->unlink_cycle ==
-                                       ehci->async_unlink_cycle)
+                       ++count;
+                       if (qh->unlink_cycle == ehci->async_unlink_cycle)
                                check_unlinks_later = true;
                        else
-                               single_unlink_async(ehci, qh);
+                               qh_to_unlink = qh;
                }
        }
 
-       /* Start a new IAA cycle if any QHs are waiting for it */
-       if (ehci->async_unlink)
-               start_iaa_cycle(ehci, false);
+       /* If nothing else is being unlinked, unlink the last empty QH */
+       if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
+               start_unlink_async(ehci, qh_to_unlink);
+               --count;
+       }
 
-       /* QHs that haven't been empty for long enough will be handled later */
-       if (check_unlinks_later) {
+       /* Other QHs will be handled later */
+       if (count > 0) {
                ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true);
                ++ehci->async_unlink_cycle;
        }
index 69ebee73c0c153581895dbd7860ab75024784394..b476daf49f6f3c6cf226b920f5c8ff01ca5d23aa 100644 (file)
@@ -213,7 +213,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 }
 
 static const unsigned char
-max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 };
 
 /* carryover low/fullspeed bandwidth that crosses uframe boundries */
 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
@@ -2212,11 +2212,11 @@ static void scan_isoc(struct ehci_hcd *ehci)
        }
        ehci->now_frame = now_frame;
 
+       frame = ehci->last_iso_frame;
        for (;;) {
                union ehci_shadow       q, *q_p;
                __hc32                  type, *hw_p;
 
-               frame = ehci->last_iso_frame;
 restart:
                /* scan each element in frame's queue for completions */
                q_p = &ehci->pshadow [frame];
@@ -2321,6 +2321,9 @@ restart:
                /* Stop when we have reached the current frame */
                if (frame == now_frame)
                        break;
-               ehci->last_iso_frame = (frame + 1) & fmask;
+
+               /* The last frame may still have active siTDs */
+               ehci->last_iso_frame = frame;
+               frame = (frame + 1) & fmask;
        }
 }
index 20dbdcbe9b0fc1fb1330da115bebb3546e45b8f2..f904071d70df2d1cb2d46710c44820ffbd3a90dc 100644 (file)
@@ -113,14 +113,15 @@ static void ehci_poll_ASS(struct ehci_hcd *ehci)
 
        if (want != actual) {
 
-               /* Poll again later, but give up after about 20 ms */
-               if (ehci->ASS_poll_count++ < 20) {
-                       ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
-                       return;
-               }
-               ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n",
-                               want, actual);
+               /* Poll again later */
+               ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true);
+               ++ehci->ASS_poll_count;
+               return;
        }
+
+       if (ehci->ASS_poll_count > 20)
+               ehci_dbg(ehci, "ASS poll count reached %d\n",
+                               ehci->ASS_poll_count);
        ehci->ASS_poll_count = 0;
 
        /* The status is up-to-date; restart or stop the schedule as needed */
@@ -159,14 +160,14 @@ static void ehci_poll_PSS(struct ehci_hcd *ehci)
 
        if (want != actual) {
 
-               /* Poll again later, but give up after about 20 ms */
-               if (ehci->PSS_poll_count++ < 20) {
-                       ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
-                       return;
-               }
-               ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
-                               want, actual);
+               /* Poll again later */
+               ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true);
+               return;
        }
+
+       if (ehci->PSS_poll_count > 20)
+               ehci_dbg(ehci, "PSS poll count reached %d\n",
+                               ehci->PSS_poll_count);
        ehci->PSS_poll_count = 0;
 
        /* The status is up-to-date; restart or stop the schedule as needed */
index 9dadc7118d68e13a84e793a10171a500e6d52fad..36c3a82105953ba2433d12845e7e02a293fa6d35 100644 (file)
@@ -38,6 +38,10 @@ typedef __u16 __bitwise __hc16;
 #endif
 
 /* statistics can be kept for tuning/monitoring */
+#ifdef DEBUG
+#define EHCI_STATS
+#endif
+
 struct ehci_stats {
        /* irq usage */
        unsigned long           normal;
@@ -221,6 +225,9 @@ struct ehci_hcd {                   /* one per controller */
 #ifdef DEBUG
        struct dentry           *debug_dir;
 #endif
+
+       /* platform-specific data -- must come last */
+       unsigned long           priv[0] __aligned(sizeof(s64));
 };
 
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
index 5105127c1d4b3e4c3ac06953c25ac959fdc24a6c..11e0b79ff9d52a8d703c3b2134b8960768123246 100644 (file)
@@ -142,6 +142,9 @@ static int usb_get_ver_info(struct device_node *np)
                        return ver;
        }
 
+       if (of_device_is_compatible(np, "fsl,mpc5121-usb2-dr"))
+               return FSL_USB_VER_OLD;
+
        if (of_device_is_compatible(np, "fsl-usb2-mph")) {
                if (of_device_is_compatible(np, "fsl-usb2-mph-v1.6"))
                        ver = FSL_USB_VER_1_6;
index bd6a7447ccc9efc41cd28e9a2c39b576c92fb6a3..f0ebe8e7c58b4dced4d50617b7a6235d90143cac 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
 #include <linux/dma-mapping.h>
+#include <linux/module.h>
 
 #include "imx21-hcd.h"
 
index d370245a4ee22af7716983da3b48a9791251a4ff..5e3a6deb62b1e7d37b30f0f8a4f534f9e740fda9 100644 (file)
@@ -128,7 +128,8 @@ static void tmio_start_hc(struct platform_device *dev)
        tmio_iowrite8(2, tmio->ccr + CCR_INTC);
 
        dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
-                       tmio_ioread8(tmio->ccr + CCR_REVID), hcd->rsrc_start, hcd->irq);
+                       tmio_ioread8(tmio->ccr + CCR_REVID),
+                       (u64) hcd->rsrc_start, hcd->irq);
 }
 
 static int ohci_tmio_start(struct usb_hcd *hcd)
index a3b6d7104ae237f17846fdd7d41f4f14fcc7e4b5..4c338ec03a07d1bfa72ceea172f07e19f151f54b 100644 (file)
@@ -780,6 +780,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
                                "defaulting to EHCI.\n");
                dev_warn(&xhci_pdev->dev,
                                "USB 3.0 devices will work at USB 2.0 speeds.\n");
+               usb_disable_xhci_ports(xhci_pdev);
                return;
        }
 
index 4b9e9aba26654e3f70f5801bf2ada2438a739c94..4f64d24eebc82b7927afbf1d7bace3d3eb433160 100644 (file)
@@ -447,6 +447,10 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
                return IRQ_NONE;
        uhci_writew(uhci, status, USBSTS);              /* Clear it */
 
+       spin_lock(&uhci->lock);
+       if (unlikely(!uhci->is_initialized))    /* not yet configured */
+               goto done;
+
        if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
                if (status & USBSTS_HSE)
                        dev_err(uhci_dev(uhci), "host system error, "
@@ -455,7 +459,6 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
                        dev_err(uhci_dev(uhci), "host controller process "
                                        "error, something bad happened!\n");
                if (status & USBSTS_HCH) {
-                       spin_lock(&uhci->lock);
                        if (uhci->rh_state >= UHCI_RH_RUNNING) {
                                dev_err(uhci_dev(uhci),
                                        "host controller halted, "
@@ -473,15 +476,15 @@ static irqreturn_t uhci_irq(struct usb_hcd *hcd)
                                 * pending unlinks */
                                mod_timer(&hcd->rh_timer, jiffies);
                        }
-                       spin_unlock(&uhci->lock);
                }
        }
 
-       if (status & USBSTS_RD)
+       if (status & USBSTS_RD) {
+               spin_unlock(&uhci->lock);
                usb_hcd_poll_rh_status(hcd);
-       else {
-               spin_lock(&uhci->lock);
+       } else {
                uhci_scan_schedule(uhci);
+ done:
                spin_unlock(&uhci->lock);
        }
 
@@ -662,9 +665,9 @@ static int uhci_start(struct usb_hcd *hcd)
         */
        mb();
 
+       spin_lock_irq(&uhci->lock);
        configure_hc(uhci);
        uhci->is_initialized = 1;
-       spin_lock_irq(&uhci->lock);
        start_rh(uhci);
        spin_unlock_irq(&uhci->lock);
        return 0;
index 768d54295a20742a8f38d6224ce1a9bb1de85556..15d13229ddbb6860d41e706bfaf716fe49b93067 100644 (file)
@@ -116,6 +116,7 @@ static void uhci_finish_suspend(struct uhci_hcd *uhci, int port,
                }
        }
        clear_bit(port, &uhci->resuming_ports);
+       usb_hcd_end_port_resume(&uhci_to_hcd(uhci)->self, port);
 }
 
 /* Wait for the UHCI controller in HP's iLO2 server management chip.
@@ -167,6 +168,8 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
                                set_bit(port, &uhci->resuming_ports);
                                uhci->ports_timeout = jiffies +
                                                msecs_to_jiffies(25);
+                               usb_hcd_start_port_resume(
+                                               &uhci_to_hcd(uhci)->self, port);
 
                                /* Make sure we see the port again
                                 * after the resuming period is over. */
index a686cf4905bb80ae3e4c7b206c05d7cd2b960920..68914429482f30ff4afed47180aae563d07c5784 100644 (file)
@@ -761,12 +761,39 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                        break;
                case USB_PORT_FEAT_LINK_STATE:
                        temp = xhci_readl(xhci, port_array[wIndex]);
+
+                       /* Disable port */
+                       if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
+                               xhci_dbg(xhci, "Disable port %d\n", wIndex);
+                               temp = xhci_port_state_to_neutral(temp);
+                               /*
+                                * Clear all change bits, so that we get a new
+                                * connection event.
+                                */
+                               temp |= PORT_CSC | PORT_PEC | PORT_WRC |
+                                       PORT_OCC | PORT_RC | PORT_PLC |
+                                       PORT_CEC;
+                               xhci_writel(xhci, temp | PORT_PE,
+                                       port_array[wIndex]);
+                               temp = xhci_readl(xhci, port_array[wIndex]);
+                               break;
+                       }
+
+                       /* Put link in RxDetect (enable port) */
+                       if (link_state == USB_SS_PORT_LS_RX_DETECT) {
+                               xhci_dbg(xhci, "Enable port %d\n", wIndex);
+                               xhci_set_link_state(xhci, port_array, wIndex,
+                                               link_state);
+                               temp = xhci_readl(xhci, port_array[wIndex]);
+                               break;
+                       }
+
                        /* Software should not attempt to set
-                        * port link state above '5' (Rx.Detect) and the port
+                        * port link state above '3' (U3) and the port
                         * must be enabled.
                         */
                        if ((temp & PORT_PE) == 0 ||
-                               (link_state > USB_SS_PORT_LS_RX_DETECT)) {
+                               (link_state > USB_SS_PORT_LS_U3)) {
                                xhci_warn(xhci, "Cannot set link state.\n");
                                goto error;
                        }
@@ -957,6 +984,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
        int max_ports;
        __le32 __iomem **port_array;
        struct xhci_bus_state *bus_state;
+       bool reset_change = false;
 
        max_ports = xhci_get_ports(hcd, &port_array);
        bus_state = &xhci->bus_state[hcd_index(hcd)];
@@ -988,6 +1016,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
                        buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
                        status = 1;
                }
+               if ((temp & PORT_RC))
+                       reset_change = true;
+       }
+       if (!status && !reset_change) {
+               xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+               clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
        }
        spin_unlock_irqrestore(&xhci->lock, flags);
        return status ? retval : 0;
index fb51c7085ad0d19c7159afd9a069c82846afd405..35616ffbe3ae7435155bf29196d295f35a719b71 100644 (file)
@@ -1250,6 +1250,8 @@ static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
+       if (ep->desc.bInterval == 0)
+               return 0;
        return xhci_microframes_to_exponent(udev, ep,
                        ep->desc.bInterval, 0, 15);
 }
index cbb44b7b9d6553e01f9c2f45da23c8fad1b7fe92..7f76a49e90d384fdee83d0f87607113c9ebcf272 100644 (file)
@@ -1698,7 +1698,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                                faked_port_index + 1);
                if (slot_id && xhci->devs[slot_id])
                        xhci_ring_device(xhci, slot_id);
-               if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
+               if (bus_state->port_remote_wakeup & (1 << faked_port_index)) {
                        bus_state->port_remote_wakeup &=
                                ~(1 << faked_port_index);
                        xhci_test_and_clear_bit(xhci, port_array,
@@ -1725,6 +1725,15 @@ cleanup:
        if (bogus_port_status)
                return;
 
+       /*
+        * xHCI port-status-change events occur when the "or" of all the
+        * status-change bits in the portsc register changes from 0 to 1.
+        * New status changes won't cause an event if any other change
+        * bits are still set.  When an event occurs, switch over to
+        * polling to avoid losing status changes.
+        */
+       xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
        spin_unlock(&xhci->lock);
        /* Pass this up to the core */
        usb_hcd_poll_rh_status(hcd);
@@ -2580,6 +2589,8 @@ cleanup:
                                (trb_comp_code != COMP_STALL &&
                                        trb_comp_code != COMP_BABBLE))
                                xhci_urb_free_priv(xhci, urb_priv);
+                       else
+                               kfree(urb_priv);
 
                        usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
                        if ((urb->actual_length != urb->transfer_buffer_length &&
@@ -3099,7 +3110,7 @@ static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
         * running_total.
         */
        packets_transferred = (running_total + trb_buff_len) /
-               usb_endpoint_maxp(&urb->ep->desc);
+               GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
 
        if ((total_packet_count - packets_transferred) > 31)
                return 31 << 17;
@@ -3633,7 +3644,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                td_len = urb->iso_frame_desc[i].length;
                td_remain_len = td_len;
                total_packet_count = DIV_ROUND_UP(td_len,
-                               usb_endpoint_maxp(&urb->ep->desc));
+                               GET_MAX_PACKET(
+                                       usb_endpoint_maxp(&urb->ep->desc)));
                /* A zero-length transfer still involves at least one packet. */
                if (total_packet_count == 0)
                        total_packet_count++;
@@ -3655,9 +3667,11 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                td = urb_priv->td[i];
                for (j = 0; j < trbs_per_td; j++) {
                        u32 remainder = 0;
-                       field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
+                       field = 0;
 
                        if (first_trb) {
+                               field = TRB_TBC(burst_count) |
+                                       TRB_TLBPC(residue);
                                /* Queue the isoc TRB */
                                field |= TRB_TYPE(TRB_ISOC);
                                /* Assume URB_ISO_ASAP is set */
index 5c72c431bab18da941ec9a1a235c134086065482..f1f01a834ba792cee0935c5ffb9373d53fb368bd 100644 (file)
@@ -884,6 +884,11 @@ int xhci_suspend(struct xhci_hcd *xhci)
                        xhci->shared_hcd->state != HC_STATE_SUSPENDED)
                return -EINVAL;
 
+       /* Don't poll the roothubs on bus suspend. */
+       xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+
        spin_lock_irq(&xhci->lock);
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
        clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
@@ -1069,6 +1074,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                compliance_mode_recovery_timer_init(xhci);
 
+       /* Re-enable port polling. */
+       xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
+
        return retval;
 }
 #endif /* CONFIG_PM */
index 7667b12f2ff5b0a0a32cd236d6d6e5d970e6cc47..268148de97147d7f77652914cd46660954c83525 100644 (file)
@@ -2179,7 +2179,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
                if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
                        break;
                retval = 0;
-               dev_info(&intf->dev, "TEST 17:  unlink from %d queues of "
+               dev_info(&intf->dev, "TEST 24:  unlink from %d queues of "
                                "%d %d-byte writes\n",
                                param->iterations, param->sglen, param->length);
                for (i = param->iterations; retval == 0 && i > 0; --i) {
index 0968dd7a859def7e04496d525bc51bdecb87cea0..f522000e8f0659b11124f48ef94f6f0c1bdb104c 100644 (file)
@@ -105,7 +105,7 @@ static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
        musb_writel(&tx->tx_complete, 0, ptr);
 }
 
-static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+static void cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
 {
        int     j;
 
@@ -150,7 +150,7 @@ static void cppi_pool_free(struct cppi_channel *c)
        c->last_processed = NULL;
 }
 
-static int __init cppi_controller_start(struct dma_controller *c)
+static int cppi_controller_start(struct dma_controller *c)
 {
        struct cppi     *controller;
        void __iomem    *tibase;
index f1c6c5470b92550831cbe6551cf8cb090b2d491d..fd3486745e64bcdecd3aeea4843971e5e1795c22 100644 (file)
@@ -2298,10 +2298,7 @@ static int __init musb_init(void)
        if (usb_disabled())
                return 0;
 
-       pr_info("%s: version " MUSB_VERSION ", "
-               "?dma?"
-               ", "
-               "otg (peripheral+host)",
+       pr_info("%s: version " MUSB_VERSION ", ?dma?, otg (peripheral+host)\n",
                musb_driver_name);
        return platform_driver_register(&musb_driver);
 }
index e6f2ae8368bb8b046b34716698b553156df30a99..f7d764de6fdab988bde6124a8a9cd8657a0ea0ff 100644 (file)
@@ -134,6 +134,11 @@ static const resource_size_t dsps_control_module_phys[] = {
        DSPS_AM33XX_CONTROL_MODULE_PHYS_1,
 };
 
+#define USBPHY_CM_PWRDN                (1 << 0)
+#define USBPHY_OTG_PWRDN       (1 << 1)
+#define USBPHY_OTGVDET_EN      (1 << 19)
+#define USBPHY_OTGSESSEND_EN   (1 << 20)
+
 /**
  * musb_dsps_phy_control - phy on/off
  * @glue: struct dsps_glue *
index 6223062d5d1b5487534dc29724508d2e5df68793..37962c99ff1e9ace79bd89fdadfe4103649909e6 100644 (file)
@@ -110,7 +110,7 @@ config AB8500_USB
 
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
-       depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
+       depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND
        select USB_OTG
        select USB_OTG_UTILS
        help
index 1dd57504186db5225b28b353527001b4e1914b2e..eace975991a873a938983da7c3dbb5abc2ce2df5 100644 (file)
@@ -240,7 +240,7 @@ static void otg_clock_enable(struct mv_otg *mvotg)
        unsigned int i;
 
        for (i = 0; i < mvotg->clknum; i++)
-               clk_enable(mvotg->clk[i]);
+               clk_prepare_enable(mvotg->clk[i]);
 }
 
 static void otg_clock_disable(struct mv_otg *mvotg)
@@ -248,7 +248,7 @@ static void otg_clock_disable(struct mv_otg *mvotg)
        unsigned int i;
 
        for (i = 0; i < mvotg->clknum; i++)
-               clk_disable(mvotg->clk[i]);
+               clk_disable_unprepare(mvotg->clk[i]);
 }
 
 static int mv_otg_enable_internal(struct mv_otg *mvotg)
index dd41f61893ef87484fbfc233dc338427bcfcede6..f2985cd88021c0bd52178ee48cc64c4c7c97bc00 100644 (file)
@@ -545,15 +545,6 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
        return 0;
 }
 
-static void usbhsg_uep_init(struct usbhsg_gpriv *gpriv)
-{
-       int i;
-       struct usbhsg_uep *uep;
-
-       usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
-               uep->pipe = NULL;
-}
-
 /*
  *
  *             usb_ep_ops
@@ -610,7 +601,12 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
 {
        struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
 
-       return usbhsg_pipe_disable(uep);
+       usbhsg_pipe_disable(uep);
+
+       uep->pipe->mod_private  = NULL;
+       uep->pipe               = NULL;
+
+       return 0;
 }
 
 static struct usb_request *usbhsg_ep_alloc_request(struct usb_ep *ep,
@@ -761,9 +757,8 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
        usbhs_pipe_init(priv,
                        usbhsg_dma_map_ctrl);
        usbhs_fifo_init(priv);
-       usbhsg_uep_init(gpriv);
 
-       /* dcp init */
+       /* dcp init instead of usbhsg_ep_enable() */
        dcp->pipe               = usbhs_dcp_malloc(priv);
        dcp->pipe->mod_private  = dcp;
        usbhs_pipe_config_update(dcp->pipe, 0, 0, 64);
@@ -825,7 +820,7 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
        usbhs_sys_set_test_mode(priv, 0);
        usbhs_sys_function_ctrl(priv, 0);
 
-       usbhsg_pipe_disable(dcp);
+       usbhsg_ep_disable(&dcp->ep);
 
        dev_dbg(dev, "stop gadget\n");
 
@@ -998,6 +993,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
         */
        usbhsg_for_each_uep_with_dcp(uep, gpriv, i) {
                uep->gpriv      = gpriv;
+               uep->pipe       = NULL;
                snprintf(uep->ep_name, EP_NAME_SIZE, "ep%d", i);
 
                uep->ep.name            = uep->ep_name;
index 3d3cd6ca2689894a497efefeb6ffc1edf6002cba..b86815421c8d4b6bbf86a54f39bd0e83c82d8bda 100644 (file)
@@ -661,9 +661,10 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
                status = -ESHUTDOWN;
 
        urb->actual_length = pkt->actual;
-       usbhsh_ureq_free(hpriv, ureq);
 
        usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+       usbhsh_ureq_free(hpriv, ureq);
+
        usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
 
        usb_hcd_unlink_urb_from_ep(hcd, urb);
index f14736f647ff28ca31e583e08c1914879836978d..edc0f0dcad8378f4298d68815d2935404d8fedac 100644 (file)
@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
        { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
        { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
+       { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
        { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
        { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
        { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
index 0a373b3ae96a77c5f43afbc316c5faaba82e0609..90ceef1776c34197c65fb58988e0fc5771184a35 100644 (file)
@@ -584,6 +584,7 @@ static struct usb_device_id id_table_combined [] = {
        /*
         * ELV devices:
         */
+       { USB_DEVICE(FTDI_ELV_VID, FTDI_ELV_WS300_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
@@ -670,6 +671,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
        { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
        { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_MHAM_KW_PID) },
@@ -875,6 +877,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
+       /* Crucible Devices */
+       { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
index 049b6e715fa470921748a9d29a236946e148ec7a..9d359e189a645f7dcd13222433b9377787f81a34 100644 (file)
 #define XSENS_CONVERTER_6_PID  0xD38E
 #define XSENS_CONVERTER_7_PID  0xD38F
 
+/**
+ * Zolix (www.zolix.com.cb) product ids
+ */
+#define FTDI_OMNI1509                  0xD491  /* Omni1509 embedded USB-serial */
+
 /*
  * NDI (www.ndigital.com) product ids
  */
 
 /*
  * ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
- * All of these devices use FTDI's vendor ID (0x0403).
+ * Almost all of these devices use FTDI's vendor ID (0x0403).
  * Further IDs taken from ELV Windows .inf file.
  *
  * The previously included PID for the UO 100 module was incorrect.
  *
  * Armin Laeuger originally sent the PID for the UM 100 module.
  */
+#define FTDI_ELV_VID   0x1B1F  /* ELV AG */
+#define FTDI_ELV_WS300_PID     0xC006  /* eQ3 WS 300 PC II */
 #define FTDI_ELV_USR_PID       0xE000  /* ELV Universal-Sound-Recorder */
 #define FTDI_ELV_MSM1_PID      0xE001  /* ELV Mini-Sound-Modul */
 #define FTDI_ELV_KL100_PID     0xE002  /* ELV Kfz-Leistungsmesser KL 100 */
  * ATI command output: Cinterion MC55i
  */
 #define FTDI_CINTERION_MC55I_PID       0xA951
+
+/*
+ * Product: Comet Caller ID decoder
+ * Manufacturer: Crucible Technologies
+ */
+#define FTDI_CT_COMET_PID      0x8e08
index 58184f3de6867bfa9ebbbc6d6afbe074d80a8f12..82afc4d6a327d6bdbebc4707507684c966f11f9f 100644 (file)
@@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
        wait_queue_t wait;
        unsigned long flags;
 
+       if (!tty)
+               return;
+
        if (!timeout)
                timeout = (HZ * EDGE_CLOSING_WAIT)/100;
 
index e6f87b76c7156914dac281684e6130fde3b85ac0..567bc77d63970357b2dc9224f04c635ab727fc85 100644 (file)
@@ -242,6 +242,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_CC864_DUAL               0x1005
 #define TELIT_PRODUCT_CC864_SINGLE             0x1006
 #define TELIT_PRODUCT_DE910_DUAL               0x1010
+#define TELIT_PRODUCT_LE920                    0x1200
 
 /* ZTE PRODUCTS */
 #define ZTE_VENDOR_ID                          0x19d2
@@ -288,6 +289,7 @@ static void option_instat_callback(struct urb *urb);
 #define ALCATEL_VENDOR_ID                      0x1bbb
 #define ALCATEL_PRODUCT_X060S_X200             0x0000
 #define ALCATEL_PRODUCT_X220_X500D             0x0017
+#define ALCATEL_PRODUCT_L100V                  0x011e
 
 #define PIRELLI_VENDOR_ID                      0x1266
 #define PIRELLI_PRODUCT_C100_1                 0x1002
@@ -429,9 +431,12 @@ static void option_instat_callback(struct urb *urb);
 #define MEDIATEK_VENDOR_ID                     0x0e8d
 #define MEDIATEK_PRODUCT_DC_1COM               0x00a0
 #define MEDIATEK_PRODUCT_DC_4COM               0x00a5
+#define MEDIATEK_PRODUCT_DC_4COM2              0x00a7
 #define MEDIATEK_PRODUCT_DC_5COM               0x00a4
 #define MEDIATEK_PRODUCT_7208_1COM             0x7101
 #define MEDIATEK_PRODUCT_7208_2COM             0x7102
+#define MEDIATEK_PRODUCT_7103_2COM             0x7103
+#define MEDIATEK_PRODUCT_7106_2COM             0x7106
 #define MEDIATEK_PRODUCT_FP_1COM               0x0003
 #define MEDIATEK_PRODUCT_FP_2COM               0x0023
 #define MEDIATEK_PRODUCT_FPDC_1COM             0x0043
@@ -441,6 +446,18 @@ static void option_instat_callback(struct urb *urb);
 #define CELLIENT_VENDOR_ID                     0x2692
 #define CELLIENT_PRODUCT_MEN200                        0x9005
 
+/* Hyundai Petatel Inc. products */
+#define PETATEL_VENDOR_ID                      0x1ff4
+#define PETATEL_PRODUCT_NP10T                  0x600e
+
+/* TP-LINK Incorporated products */
+#define TPLINK_VENDOR_ID                       0x2357
+#define TPLINK_PRODUCT_MA180                   0x0201
+
+/* Changhong products */
+#define CHANGHONG_VENDOR_ID                    0x2077
+#define CHANGHONG_PRODUCT_CH690                        0x7001
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -522,6 +539,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
        .reserved = BIT(3) | BIT(4),
 };
 
+static const struct option_blacklist_info telit_le920_blacklist = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1) | BIT(5),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -772,6 +794,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+               .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
                .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -922,8 +946,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
          .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
-       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
          .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1190,6 +1216,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
        },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) },
+       { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
        { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1294,7 +1322,15 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7103_2COM, 0xff, 0x00, 0x00) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
        { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+       { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+       { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+         .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index aa148c21ea40e82a6f49b58b855ed2278f53cf3a..24662547dc5b2e60ffc51abc846e930e0afc1182 100644 (file)
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_G1K(0x05c6, 0x9221)},   /* Generic Gobi QDL device */
        {DEVICE_G1K(0x05c6, 0x9231)},   /* Generic Gobi QDL device */
        {DEVICE_G1K(0x1f45, 0x0001)},   /* Unknown Gobi QDL device */
+       {DEVICE_G1K(0x1bc7, 0x900e)},   /* Telit Gobi QDL device */
 
        /* Gobi 2000 devices */
        {USB_DEVICE(0x1410, 0xa010)},   /* Novatel Gobi 2000 QDL device */
index 105d900150c1a80155c2afcec6a61f4fb07fd0b4..16b0bf055eeb08427436841fc784b07554dd27cc 100644 (file)
@@ -92,8 +92,8 @@ int usb_stor_ucr61s2b_init(struct us_data *us)
        return 0;
 }
 
-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us)
+/* This places the HUAWEI usb dongles in multi-port mode */
+static int usb_stor_huawei_feature_init(struct us_data *us)
 {
        int result;
 
@@ -104,3 +104,75 @@ int usb_stor_huawei_e220_init(struct us_data *us)
        US_DEBUGP("Huawei mode set result is %d\n", result);
        return 0;
 }
+
+/*
+ * It will send a scsi switch command called rewind' to huawei dongle.
+ * When the dongle receives this command at the first time,
+ * it will reboot immediately. After rebooted, it will ignore this command.
+ * So it is  unnecessary to read its response.
+ */
+static int usb_stor_huawei_scsi_init(struct us_data *us)
+{
+       int result = 0;
+       int act_len = 0;
+       struct bulk_cb_wrap *bcbw = (struct bulk_cb_wrap *) us->iobuf;
+       char rewind_cmd[] = {0x11, 0x06, 0x20, 0x00, 0x00, 0x01, 0x01, 0x00,
+                       0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+       bcbw->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+       bcbw->Tag = 0;
+       bcbw->DataTransferLength = 0;
+       bcbw->Flags = bcbw->Lun = 0;
+       bcbw->Length = sizeof(rewind_cmd);
+       memset(bcbw->CDB, 0, sizeof(bcbw->CDB));
+       memcpy(bcbw->CDB, rewind_cmd, sizeof(rewind_cmd));
+
+       result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, bcbw,
+                                       US_BULK_CB_WRAP_LEN, &act_len);
+       US_DEBUGP("transfer actual length=%d, result=%d\n", act_len, result);
+       return result;
+}
+
+/*
+ * It tries to find the supported Huawei USB dongles.
+ * In Huawei, they assign the following product IDs
+ * for all of their mobile broadband dongles,
+ * including the new dongles in the future.
+ * So if the product ID is not included in this list,
+ * it means it is not Huawei's mobile broadband dongles.
+ */
+static int usb_stor_huawei_dongles_pid(struct us_data *us)
+{
+       struct usb_interface_descriptor *idesc;
+       int idProduct;
+
+       idesc = &us->pusb_intf->cur_altsetting->desc;
+       idProduct = us->pusb_dev->descriptor.idProduct;
+       /* The first port is CDROM,
+        * means the dongle in the single port mode,
+        * and a switch command is required to be sent. */
+       if (idesc && idesc->bInterfaceNumber == 0) {
+               if ((idProduct == 0x1001)
+                       || (idProduct == 0x1003)
+                       || (idProduct == 0x1004)
+                       || (idProduct >= 0x1401 && idProduct <= 0x1500)
+                       || (idProduct >= 0x1505 && idProduct <= 0x1600)
+                       || (idProduct >= 0x1c02 && idProduct <= 0x2202)) {
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+int usb_stor_huawei_init(struct us_data *us)
+{
+       int result = 0;
+
+       if (usb_stor_huawei_dongles_pid(us)) {
+               if (us->pusb_dev->descriptor.idProduct >= 0x1446)
+                       result = usb_stor_huawei_scsi_init(us);
+               else
+                       result = usb_stor_huawei_feature_init(us);
+       }
+       return result;
+}
index 529327fbb06be1b6876e8802b84bd281287291c3..5376d4fc76f04c25092244765237f94f44456ba1 100644 (file)
@@ -46,5 +46,5 @@ int usb_stor_euscsi_init(struct us_data *us);
  * flash reader */
 int usb_stor_ucr61s2b_init(struct us_data *us);
 
-/* This places the HUAWEI E220 devices in multi-port mode */
-int usb_stor_huawei_e220_init(struct us_data *us);
+/* This places the HUAWEI usb dongles in multi-port mode */
+int usb_stor_huawei_init(struct us_data *us);
index d305a5aa3a5d7683a5bcd1c2e60c4a0262f35f65..72923b56bbf6b871a6da926168d43cf8db3e6383 100644 (file)
@@ -1527,335 +1527,10 @@ UNUSUAL_DEV(  0x1210, 0x0003, 0x0100, 0x0100,
 /* Reported by fangxiaozhi <huananhu@huawei.com>
  * This brings the HUAWEI data card devices into multi-port mode
  */
-UNUSUAL_DEV(  0x12d1, 0x1001, 0x0000, 0x0000,
+UNUSUAL_VENDOR_INTF(0x12d1, 0x08, 0x06, 0x50,
                "HUAWEI MOBILE",
                "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1003, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1004, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1401, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1402, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1403, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1404, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1405, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1406, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1407, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1408, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1409, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x140F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1410, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1411, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1412, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1413, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1414, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1415, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1416, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1417, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1418, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1419, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x141F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1420, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1421, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1422, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1423, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1424, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1425, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1426, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1427, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1428, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1429, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x142F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1430, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1431, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1432, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1433, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1434, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1435, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1436, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1437, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1438, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x1439, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143A, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143B, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143C, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143D, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143E, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
-               0),
-UNUSUAL_DEV(  0x12d1, 0x143F, 0x0000, 0x0000,
-               "HUAWEI MOBILE",
-               "Mass Storage",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_e220_init,
+               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_huawei_init,
                0),
 
 /* Reported by Vilius Bilinkevicius <vilisas AT xxx DOT lt) */
index 31b3e1a61bbdda035d5ce1e5ef63ac05e36b5b26..cf09b6ba71ff49f5be0e367811f72e3794630f32 100644 (file)
@@ -120,6 +120,17 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
        .useTransport = use_transport,  \
 }
 
+#define UNUSUAL_VENDOR_INTF(idVendor, cl, sc, pr, \
+               vendor_name, product_name, use_protocol, use_transport, \
+               init_function, Flags) \
+{ \
+       .vendorName = vendor_name,      \
+       .productName = product_name,    \
+       .useProtocol = use_protocol,    \
+       .useTransport = use_transport,  \
+       .initFunction = init_function,  \
+}
+
 static struct us_unusual_dev us_unusual_dev_list[] = {
 #      include "unusual_devs.h"
        { }             /* Terminating entry */
@@ -131,6 +142,7 @@ static struct us_unusual_dev for_dynamic_ids =
 #undef UNUSUAL_DEV
 #undef COMPLIANT_DEV
 #undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF
 
 #ifdef CONFIG_LOCKDEP
 
index b78a526910fb31ac257a13800c593236c0e21a27..5ef8ce74aae456c741bdff0856b564e9edb156a0 100644 (file)
 #define USUAL_DEV(useProto, useTrans) \
 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans) }
 
+/* Define the device is matched with Vendor ID and interface descriptors */
+#define UNUSUAL_VENDOR_INTF(id_vendor, cl, sc, pr, \
+                       vendorName, productName, useProtocol, useTransport, \
+                       initFunction, flags) \
+{ \
+       .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+                               | USB_DEVICE_ID_MATCH_VENDOR, \
+       .idVendor    = (id_vendor), \
+       .bInterfaceClass = (cl), \
+       .bInterfaceSubClass = (sc), \
+       .bInterfaceProtocol = (pr), \
+       .driver_info = (flags) \
+}
+
 struct usb_device_id usb_storage_usb_ids[] = {
 #      include "unusual_devs.h"
        { }             /* Terminating entry */
@@ -50,6 +64,7 @@ MODULE_DEVICE_TABLE(usb, usb_storage_usb_ids);
 #undef UNUSUAL_DEV
 #undef COMPLIANT_DEV
 #undef USUAL_DEV
+#undef UNUSUAL_VENDOR_INTF
 
 /*
  * The table of devices to ignore
index 4362d9e7baa355a971ed5935a486757b49699154..f72323ef618fbd99ce88d0c99480d165b94e2f42 100644 (file)
@@ -240,17 +240,17 @@ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
                        filled = 1;
                } else {
                        /* Drop writes, fill reads with FF */
+                       filled = min((size_t)(x_end - pos), count);
                        if (!iswrite) {
                                char val = 0xFF;
                                size_t i;
 
-                               for (i = 0; i < x_end - pos; i++) {
+                               for (i = 0; i < filled; i++) {
                                        if (put_user(val, buf + i))
                                                goto out;
                                }
                        }
 
-                       filled = x_end - pos;
                }
 
                count -= filled;
index ebd08b21b23432696047487367d44f76d4fa4f02..959b1cd89e6a5be5a402a79089077609f8e30716 100644 (file)
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net)
 }
 
 /* Caller must have TX VQ lock */
-static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+static int tx_poll_start(struct vhost_net *net, struct socket *sock)
 {
+       int ret;
+
        if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
-               return;
-       vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
-       net->tx_poll_state = VHOST_NET_POLL_STARTED;
+               return 0;
+       ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+       if (!ret)
+               net->tx_poll_state = VHOST_NET_POLL_STARTED;
+       return ret;
 }
 
 /* In case of DMA done not in order in lower device driver for some reason.
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n,
                vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
 }
 
-static void vhost_net_enable_vq(struct vhost_net *n,
+static int vhost_net_enable_vq(struct vhost_net *n,
                                struct vhost_virtqueue *vq)
 {
        struct socket *sock;
+       int ret;
 
        sock = rcu_dereference_protected(vq->private_data,
                                         lockdep_is_held(&vq->mutex));
        if (!sock)
-               return;
+               return 0;
        if (vq == n->vqs + VHOST_NET_VQ_TX) {
                n->tx_poll_state = VHOST_NET_POLL_STOPPED;
-               tx_poll_start(n, sock);
+               ret = tx_poll_start(n, sock);
        } else
-               vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+               ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+
+       return ret;
 }
 
 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
                        r = PTR_ERR(ubufs);
                        goto err_ubufs;
                }
-               oldubufs = vq->ubufs;
-               vq->ubufs = ubufs;
+
                vhost_net_disable_vq(n, vq);
                rcu_assign_pointer(vq->private_data, sock);
-               vhost_net_enable_vq(n, vq);
-
                r = vhost_init_used(vq);
                if (r)
-                       goto err_vq;
+                       goto err_used;
+               r = vhost_net_enable_vq(n, vq);
+               if (r)
+                       goto err_used;
+
+               oldubufs = vq->ubufs;
+               vq->ubufs = ubufs;
 
                n->tx_packets = 0;
                n->tx_zcopy_err = 0;
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
        mutex_unlock(&n->dev.mutex);
        return 0;
 
+err_used:
+       rcu_assign_pointer(vq->private_data, oldsock);
+       vhost_net_enable_vq(n, vq);
+       if (ubufs)
+               vhost_ubuf_put_and_wait(ubufs);
 err_ubufs:
        fput(sock->file);
 err_vq:
index b20df5c829f5036042008d37f6ab20d424e977cd..22321cf84fbe2ce4c19090928ba5ecb83b8e5486 100644 (file)
@@ -575,10 +575,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
 
        /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
        tv_tpg = vs->vs_tpg;
-       if (unlikely(!tv_tpg)) {
-               pr_err("%s endpoint not set\n", __func__);
+       if (unlikely(!tv_tpg))
                return;
-       }
 
        mutex_lock(&vq->mutex);
        vhost_disable_notify(&vs->dev, vq);
index 34389f75fe65693a4ad5baa36715159fd5759bbb..9759249e6d908867cf72f53e5bb8e4ecc30db8ae 100644 (file)
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
+       poll->wqh = NULL;
 
        vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
  * keep a reference to a file until after vhost_poll_stop is called. */
-void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+int vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
        unsigned long mask;
+       int ret = 0;
 
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+       if (mask & POLLERR) {
+               if (poll->wqh)
+                       remove_wait_queue(poll->wqh, &poll->wait);
+               ret = -EINVAL;
+       }
+
+       return ret;
 }
 
 /* Stop polling a file. After this function returns, it becomes safe to drop the
  * file reference. You must also flush afterwards. */
 void vhost_poll_stop(struct vhost_poll *poll)
 {
-       remove_wait_queue(poll->wqh, &poll->wait);
+       if (poll->wqh) {
+               remove_wait_queue(poll->wqh, &poll->wait);
+               poll->wqh = NULL;
+       }
 }
 
 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
                fput(filep);
 
        if (pollstart && vq->handle_kick)
-               vhost_poll_start(&vq->poll, vq->kick);
+               r = vhost_poll_start(&vq->poll, vq->kick);
 
        mutex_unlock(&vq->mutex);
 
index 2639c58b23ab497ace850895f3322df661a965df..17261e277c022abbffe8effc6a8492336ea7edef 100644 (file)
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
 
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev);
-void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+int vhost_poll_start(struct vhost_poll *poll, struct file *file);
 void vhost_poll_stop(struct vhost_poll *poll);
 void vhost_poll_flush(struct vhost_poll *poll);
 void vhost_poll_queue(struct vhost_poll *poll);
index 4ef18e2e90ccc41ffd89bc1d4c5dc1abf8626f1d..2d0d144add1ba21d0f2ca8eb0752774ba5586f63 100644 (file)
@@ -1121,8 +1121,7 @@ static int exynos_dp_remove(struct platform_device *pdev)
 
        disable_irq(dp->irq);
 
-       if (work_pending(&dp->hotplug_work))
-               flush_work(&dp->hotplug_work);
+       flush_work(&dp->hotplug_work);
 
        if (pdev->dev.of_node) {
                if (dp->phy_addr)
@@ -1144,8 +1143,7 @@ static int exynos_dp_suspend(struct device *dev)
        struct exynos_dp_platdata *pdata = dev->platform_data;
        struct exynos_dp_device *dp = dev_get_drvdata(dev);
 
-       if (work_pending(&dp->hotplug_work))
-               flush_work(&dp->hotplug_work);
+       flush_work(&dp->hotplug_work);
 
        if (dev->of_node) {
                if (dp->phy_addr)
index 12526787a7c741e3b4eb2601dc9f2f4b008d251b..0abf2bf2083604c48006a63682c410c7125109fb 100644 (file)
@@ -139,6 +139,7 @@ struct imxfb_info {
        struct clk              *clk_ahb;
        struct clk              *clk_per;
        enum imxfb_type         devtype;
+       bool                    enabled;
 
        /*
         * These are the addresses we mapped
@@ -536,6 +537,10 @@ static void imxfb_exit_backlight(struct imxfb_info *fbi)
 
 static void imxfb_enable_controller(struct imxfb_info *fbi)
 {
+
+       if (fbi->enabled)
+               return;
+
        pr_debug("Enabling LCD controller\n");
 
        writel(fbi->screen_dma, fbi->regs + LCDC_SSA);
@@ -556,6 +561,7 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
        clk_prepare_enable(fbi->clk_ipg);
        clk_prepare_enable(fbi->clk_ahb);
        clk_prepare_enable(fbi->clk_per);
+       fbi->enabled = true;
 
        if (fbi->backlight_power)
                fbi->backlight_power(1);
@@ -565,6 +571,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
 
 static void imxfb_disable_controller(struct imxfb_info *fbi)
 {
+       if (!fbi->enabled)
+               return;
+
        pr_debug("Disabling LCD controller\n");
 
        if (fbi->backlight_power)
@@ -575,6 +584,7 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
        clk_disable_unprepare(fbi->clk_per);
        clk_disable_unprepare(fbi->clk_ipg);
        clk_disable_unprepare(fbi->clk_ahb);
+       fbi->enabled = false;
 
        writel(0, fbi->regs + LCDC_RMCR);
 }
@@ -729,6 +739,8 @@ static int __init imxfb_init_fbinfo(struct platform_device *pdev)
 
        memset(fbi, 0, sizeof(struct imxfb_info));
 
+       fbi->devtype = pdev->id_entry->driver_data;
+
        strlcpy(info->fix.id, IMX_NAME, sizeof(info->fix.id));
 
        info->fix.type                  = FB_TYPE_PACKED_PIXELS;
@@ -789,7 +801,6 @@ static int __init imxfb_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        fbi = info->par;
-       fbi->devtype = pdev->id_entry->driver_data;
 
        if (!fb_mode)
                fb_mode = pdata->mode[0].mode.name;
index 18688c12e30d9e5399d732ce6d972d101ae46b55..d7d66ef5cb58098c6f80268651c0a277957fc2e0 100644 (file)
@@ -538,6 +538,7 @@ static const enum dss_feat_id omap3630_dss_feat_list[] = {
        FEAT_ALPHA_FIXED_ZORDER,
        FEAT_FIFO_MERGE,
        FEAT_OMAP3_DSI_FIFO_BUG,
+       FEAT_DPI_USES_VDDS_DSI,
 };
 
 static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
index 4d99dd7a6831f4be0e866bcc552f62225faa526e..395cb6a8d8f3a86e3507be05686218ebee6f7d6f 100644 (file)
@@ -145,8 +145,8 @@ static void ssd1307fb_update_display(struct ssd1307fb_par *par)
                                u32 page_length = SSD1307FB_WIDTH * i;
                                u32 index = page_length + (SSD1307FB_WIDTH * k + j) / 8;
                                u8 byte = *(vmem + index);
-                               u8 bit = byte & (1 << (7 - (j % 8)));
-                               bit = bit >> (7 - (j % 8));
+                               u8 bit = byte & (1 << (j % 8));
+                               bit = bit >> (j % 8);
                                buf |= bit << k;
                        }
                        ssd1307fb_write_data(par->client, buf);
index 4dcfced107f50e41c518405b28747064d62d78bc..084041d42c9adabfc40251546d361ad76d8987e5 100644 (file)
@@ -25,10 +25,10 @@ static void disable_hotplug_cpu(int cpu)
 static int vcpu_online(unsigned int cpu)
 {
        int err;
-       char dir[32], state[32];
+       char dir[16], state[16];
 
        sprintf(dir, "cpu/%u", cpu);
-       err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
+       err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);
        if (err != 1) {
                if (!xen_initial_domain())
                        printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
index 0be4df39e953a5b870948e6ed8a1553a15cc2c2a..22f77c5f60120ea5d2737294cf08be25e2ce8bf9 100644 (file)
@@ -840,7 +840,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
 
        if (irq == -1) {
                irq = xen_allocate_irq_dynamic();
-               if (irq == -1)
+               if (irq < 0)
                        goto out;
 
                irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
@@ -944,7 +944,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
        if (irq == -1) {
                irq = xen_allocate_irq_dynamic();
-               if (irq == -1)
+               if (irq < 0)
                        goto out;
 
                irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
@@ -1787,7 +1787,7 @@ void xen_callback_vector(void)
        int rc;
        uint64_t callback_via;
        if (xen_have_vector_callback) {
-               callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
+               callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
                rc = xen_set_callback_via(callback_via);
                if (rc) {
                        printk(KERN_ERR "Request for Xen HVM callback vector"
@@ -1798,8 +1798,9 @@ void xen_callback_vector(void)
                printk(KERN_INFO "Xen HVM callback vector for event delivery is "
                                "enabled\n");
                /* in the restore case the vector has already been allocated */
-               if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
-                       alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
+               if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+                       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+                                       xen_hvm_callback_vector);
        }
 }
 #else
index 2e22df2f7a3f8b57fe44ce8842b825602a012658..3c8803feba26cdcba8bd890225ac7995ef72810e 100644 (file)
@@ -56,10 +56,15 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
 static atomic_t pages_mapped = ATOMIC_INIT(0);
 
 static int use_ptemod;
+#define populate_freeable_maps use_ptemod
 
 struct gntdev_priv {
+       /* maps with visible offsets in the file descriptor */
        struct list_head maps;
-       /* lock protects maps from concurrent changes */
+       /* maps that are not visible; will be freed on munmap.
+        * Only populated if populate_freeable_maps == 1 */
+       struct list_head freeable_maps;
+       /* lock protects maps and freeable_maps */
        spinlock_t lock;
        struct mm_struct *mm;
        struct mmu_notifier mn;
@@ -193,7 +198,7 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
        return NULL;
 }
 
-static void gntdev_put_map(struct grant_map *map)
+static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 {
        if (!map)
                return;
@@ -208,6 +213,12 @@ static void gntdev_put_map(struct grant_map *map)
                evtchn_put(map->notify.event);
        }
 
+       if (populate_freeable_maps && priv) {
+               spin_lock(&priv->lock);
+               list_del(&map->next);
+               spin_unlock(&priv->lock);
+       }
+
        if (map->pages && !use_ptemod)
                unmap_grant_pages(map, 0, map->count);
        gntdev_free_map(map);
@@ -301,17 +312,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 
        if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
                int pgno = (map->notify.addr >> PAGE_SHIFT);
-               if (pgno >= offset && pgno < offset + pages && use_ptemod) {
-                       void __user *tmp = (void __user *)
-                               map->vma->vm_start + map->notify.addr;
-                       err = copy_to_user(tmp, &err, 1);
-                       if (err)
-                               return -EFAULT;
-                       map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
-               } else if (pgno >= offset && pgno < offset + pages) {
-                       uint8_t *tmp = kmap(map->pages[pgno]);
+               if (pgno >= offset && pgno < offset + pages) {
+                       /* No need for kmap, pages are in lowmem */
+                       uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
                        tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
-                       kunmap(map->pages[pgno]);
                        map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
                }
        }
@@ -376,11 +380,24 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
 static void gntdev_vma_close(struct vm_area_struct *vma)
 {
        struct grant_map *map = vma->vm_private_data;
+       struct file *file = vma->vm_file;
+       struct gntdev_priv *priv = file->private_data;
 
        pr_debug("gntdev_vma_close %p\n", vma);
-       map->vma = NULL;
+       if (use_ptemod) {
+               /* It is possible that an mmu notifier could be running
+                * concurrently, so take priv->lock to ensure that the vma won't
+                * vanishing during the unmap_grant_pages call, since we will
+                * spin here until that completes. Such a concurrent call will
+                * not do any unmapping, since that has been done prior to
+                * closing the vma, but it may still iterate the unmap_ops list.
+                */
+               spin_lock(&priv->lock);
+               map->vma = NULL;
+               spin_unlock(&priv->lock);
+       }
        vma->vm_private_data = NULL;
-       gntdev_put_map(map);
+       gntdev_put_map(priv, map);
 }
 
 static struct vm_operations_struct gntdev_vmops = {
@@ -390,33 +407,43 @@ static struct vm_operations_struct gntdev_vmops = {
 
 /* ------------------------------------------------------------------ */
 
+static void unmap_if_in_range(struct grant_map *map,
+                             unsigned long start, unsigned long end)
+{
+       unsigned long mstart, mend;
+       int err;
+
+       if (!map->vma)
+               return;
+       if (map->vma->vm_start >= end)
+               return;
+       if (map->vma->vm_end <= start)
+               return;
+       mstart = max(start, map->vma->vm_start);
+       mend   = min(end,   map->vma->vm_end);
+       pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
+                       map->index, map->count,
+                       map->vma->vm_start, map->vma->vm_end,
+                       start, end, mstart, mend);
+       err = unmap_grant_pages(map,
+                               (mstart - map->vma->vm_start) >> PAGE_SHIFT,
+                               (mend - mstart) >> PAGE_SHIFT);
+       WARN_ON(err);
+}
+
 static void mn_invl_range_start(struct mmu_notifier *mn,
                                struct mm_struct *mm,
                                unsigned long start, unsigned long end)
 {
        struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
        struct grant_map *map;
-       unsigned long mstart, mend;
-       int err;
 
        spin_lock(&priv->lock);
        list_for_each_entry(map, &priv->maps, next) {
-               if (!map->vma)
-                       continue;
-               if (map->vma->vm_start >= end)
-                       continue;
-               if (map->vma->vm_end <= start)
-                       continue;
-               mstart = max(start, map->vma->vm_start);
-               mend   = min(end,   map->vma->vm_end);
-               pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
-                               map->index, map->count,
-                               map->vma->vm_start, map->vma->vm_end,
-                               start, end, mstart, mend);
-               err = unmap_grant_pages(map,
-                                       (mstart - map->vma->vm_start) >> PAGE_SHIFT,
-                                       (mend - mstart) >> PAGE_SHIFT);
-               WARN_ON(err);
+               unmap_if_in_range(map, start, end);
+       }
+       list_for_each_entry(map, &priv->freeable_maps, next) {
+               unmap_if_in_range(map, start, end);
        }
        spin_unlock(&priv->lock);
 }
@@ -445,6 +472,15 @@ static void mn_release(struct mmu_notifier *mn,
                err = unmap_grant_pages(map, /* offset */ 0, map->count);
                WARN_ON(err);
        }
+       list_for_each_entry(map, &priv->freeable_maps, next) {
+               if (!map->vma)
+                       continue;
+               pr_debug("map %d+%d (%lx %lx)\n",
+                               map->index, map->count,
+                               map->vma->vm_start, map->vma->vm_end);
+               err = unmap_grant_pages(map, /* offset */ 0, map->count);
+               WARN_ON(err);
+       }
        spin_unlock(&priv->lock);
 }
 
@@ -466,6 +502,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&priv->maps);
+       INIT_LIST_HEAD(&priv->freeable_maps);
        spin_lock_init(&priv->lock);
 
        if (use_ptemod) {
@@ -500,8 +537,9 @@ static int gntdev_release(struct inode *inode, struct file *flip)
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
-               gntdev_put_map(map);
+               gntdev_put_map(NULL /* already removed */, map);
        }
+       WARN_ON(!list_empty(&priv->freeable_maps));
 
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
@@ -529,14 +567,14 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 
        if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
                pr_debug("can't map: over limit\n");
-               gntdev_put_map(map);
+               gntdev_put_map(NULL, map);
                return err;
        }
 
        if (copy_from_user(map->grants, &u->refs,
                           sizeof(map->grants[0]) * op.count) != 0) {
-               gntdev_put_map(map);
-               return err;
+               gntdev_put_map(NULL, map);
+               return -EFAULT;
        }
 
        spin_lock(&priv->lock);
@@ -565,11 +603,13 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
        map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
        if (map) {
                list_del(&map->next);
+               if (populate_freeable_maps)
+                       list_add_tail(&map->next, &priv->freeable_maps);
                err = 0;
        }
        spin_unlock(&priv->lock);
        if (map)
-               gntdev_put_map(map);
+               gntdev_put_map(priv, map);
        return err;
 }
 
@@ -579,25 +619,31 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
        struct ioctl_gntdev_get_offset_for_vaddr op;
        struct vm_area_struct *vma;
        struct grant_map *map;
+       int rv = -EINVAL;
 
        if (copy_from_user(&op, u, sizeof(op)) != 0)
                return -EFAULT;
        pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 
+       down_read(&current->mm->mmap_sem);
        vma = find_vma(current->mm, op.vaddr);
        if (!vma || vma->vm_ops != &gntdev_vmops)
-               return -EINVAL;
+               goto out_unlock;
 
        map = vma->vm_private_data;
        if (!map)
-               return -EINVAL;
+               goto out_unlock;
 
        op.offset = map->index << PAGE_SHIFT;
        op.count = map->count;
+       rv = 0;
 
-       if (copy_to_user(u, &op, sizeof(op)) != 0)
+ out_unlock:
+       up_read(&current->mm->mmap_sem);
+
+       if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
                return -EFAULT;
-       return 0;
+       return rv;
 }
 
 static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
@@ -778,7 +824,7 @@ out_unlock_put:
 out_put_map:
        if (use_ptemod)
                map->vma = NULL;
-       gntdev_put_map(map);
+       gntdev_put_map(priv, map);
        return err;
 }
 
index 7038de53652b3cd8999ac7d642fd84441c79dcd5..157c0ccda3efdce0e8a3c4b9ab0937ade1d932ec 100644 (file)
 /* External tools reserve first few grant table entries. */
 #define NR_RESERVED_ENTRIES 8
 #define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME \
-(grant_table_version == 1 ?                      \
-(PAGE_SIZE / sizeof(struct grant_entry_v1)) :   \
-(PAGE_SIZE / sizeof(union grant_entry_v2)))
 
 static grant_ref_t **gnttab_list;
 static unsigned int nr_grant_frames;
@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;
 static grant_status_t *grstatus;
 
 static int grant_table_version;
+static int grefs_per_grant_frame;
 
 static struct gnttab_free_callback *gnttab_free_callback_list;
 
@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)
        unsigned int new_nr_grant_frames, extra_entries, i;
        unsigned int nr_glist_frames, new_nr_glist_frames;
 
+       BUG_ON(grefs_per_grant_frame == 0);
+
        new_nr_grant_frames = nr_grant_frames + more_frames;
-       extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
+       extra_entries       = more_frames * grefs_per_grant_frame;
 
-       nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+       nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
        new_nr_glist_frames =
-               (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+               (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
        for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
                gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
                if (!gnttab_list[i])
@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)
        }
 
 
-       for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
-            i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
+       for (i = grefs_per_grant_frame * nr_grant_frames;
+            i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
                gnttab_entry(i) = i + 1;
 
        gnttab_entry(i) = gnttab_free_head;
-       gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+       gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
        gnttab_free_count += extra_entries;
 
        nr_grant_frames = new_nr_grant_frames;
@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 
 static unsigned nr_status_frames(unsigned nr_grant_frames)
 {
-       return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+       BUG_ON(grefs_per_grant_frame == 0);
+       return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
 }
 
 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)
        rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
        if (rc == 0 && gsv.version == 2) {
                grant_table_version = 2;
+               grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
                gnttab_interface = &gnttab_v2_ops;
        } else if (grant_table_version == 2) {
                /*
@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)
                panic("we need grant tables version 2, but only version 1 is available");
        } else {
                grant_table_version = 1;
+               grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
                gnttab_interface = &gnttab_v1_ops;
        }
        printk(KERN_INFO "Grant tables using version %d layout.\n",
                grant_table_version);
 }
 
-int gnttab_resume(void)
+static int gnttab_setup(void)
 {
        unsigned int max_nr_gframes;
 
-       gnttab_request_version();
        max_nr_gframes = gnttab_max_grant_frames();
        if (max_nr_gframes < nr_grant_frames)
                return -ENOSYS;
@@ -1160,6 +1161,12 @@ int gnttab_resume(void)
        return 0;
 }
 
+int gnttab_resume(void)
+{
+       gnttab_request_version();
+       return gnttab_setup();
+}
+
 int gnttab_suspend(void)
 {
        gnttab_interface->unmap_frames();
@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)
        int rc;
        unsigned int cur, extra;
 
+       BUG_ON(grefs_per_grant_frame == 0);
        cur = nr_grant_frames;
-       extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
-                GREFS_PER_GRANT_FRAME);
+       extra = ((req_entries + (grefs_per_grant_frame-1)) /
+                grefs_per_grant_frame);
        if (cur + extra > gnttab_max_grant_frames())
                return -ENOSPC;
 
@@ -1191,21 +1199,23 @@ int gnttab_init(void)
        unsigned int nr_init_grefs;
        int ret;
 
+       gnttab_request_version();
        nr_grant_frames = 1;
        boot_max_nr_grant_frames = __max_nr_grant_frames();
 
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
+       BUG_ON(grefs_per_grant_frame == 0);
        max_nr_glist_frames = (boot_max_nr_grant_frames *
-                              GREFS_PER_GRANT_FRAME / RPP);
+                              grefs_per_grant_frame / RPP);
 
        gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
                              GFP_KERNEL);
        if (gnttab_list == NULL)
                return -ENOMEM;
 
-       nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
+       nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
        for (i = 0; i < nr_glist_frames; i++) {
                gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
                if (gnttab_list[i] == NULL) {
@@ -1214,12 +1224,12 @@ int gnttab_init(void)
                }
        }
 
-       if (gnttab_resume() < 0) {
+       if (gnttab_setup() < 0) {
                ret = -ENODEV;
                goto ini_nomem;
        }
 
-       nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
+       nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
 
        for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
                gnttab_entry(i) = i + 1;
index 067fcfa1723e0b846b9522038edb5ef1dc96f4e0..5a27a4599a4a8dbb28aab05c305b799b7883d2ac 100644 (file)
@@ -278,8 +278,7 @@ static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
         * Only those at cpu present map has its sys interface.
         */
        if (info->flags & XEN_PCPU_FLAGS_INVALID) {
-               if (pcpu)
-                       unregister_and_remove_pcpu(pcpu);
+               unregister_and_remove_pcpu(pcpu);
                return 0;
        }
 
index 0bbbccbb1f1296ea305414795b6915679b26d94d..ca2b00e9d558c8fea579f4667106eaccee0adb84 100644 (file)
@@ -199,9 +199,6 @@ static long privcmd_ioctl_mmap(void __user *udata)
        LIST_HEAD(pagelist);
        struct mmap_mfn_state state;
 
-       if (!xen_initial_domain())
-               return -EPERM;
-
        /* We only support privcmd_ioctl_mmap_batch for auto translated. */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return -ENOSYS;
@@ -261,11 +258,12 @@ struct mmap_batch_state {
         *      -ENOENT if at least 1 -ENOENT has happened.
         */
        int global_error;
-       /* An array for individual errors */
-       int *err;
+       int version;
 
        /* User-space mfn array to store errors in the second pass for V1. */
        xen_pfn_t __user *user_mfn;
+       /* User-space int array to store errors in the second pass for V2. */
+       int __user *user_err;
 };
 
 /* auto translated dom0 note: if domU being created is PV, then mfn is
@@ -288,7 +286,19 @@ static int mmap_batch_fn(void *data, void *state)
                                         &cur_page);
 
        /* Store error code for second pass. */
-       *(st->err++) = ret;
+       if (st->version == 1) {
+               if (ret < 0) {
+                       /*
+                        * V1 encodes the error codes in the 32bit top nibble of the
+                        * mfn (with its known limitations vis-a-vis 64 bit callers).
+                        */
+                       *mfnp |= (ret == -ENOENT) ?
+                                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
+                                               PRIVCMD_MMAPBATCH_MFN_ERROR;
+               }
+       } else { /* st->version == 2 */
+               *((int *) mfnp) = ret;
+       }
 
        /* And see if it affects the global_error. */
        if (ret < 0) {
@@ -305,20 +315,25 @@ static int mmap_batch_fn(void *data, void *state)
        return 0;
 }
 
-static int mmap_return_errors_v1(void *data, void *state)
+static int mmap_return_errors(void *data, void *state)
 {
-       xen_pfn_t *mfnp = data;
        struct mmap_batch_state *st = state;
-       int err = *(st->err++);
 
-       /*
-        * V1 encodes the error codes in the 32bit top nibble of the
-        * mfn (with its known limitations vis-a-vis 64 bit callers).
-        */
-       *mfnp |= (err == -ENOENT) ?
-                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
-                               PRIVCMD_MMAPBATCH_MFN_ERROR;
-       return __put_user(*mfnp, st->user_mfn++);
+       if (st->version == 1) {
+               xen_pfn_t mfnp = *((xen_pfn_t *) data);
+               if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
+                       return __put_user(mfnp, st->user_mfn++);
+               else
+                       st->user_mfn++;
+       } else { /* st->version == 2 */
+               int err = *((int *) data);
+               if (err)
+                       return __put_user(err, st->user_err++);
+               else
+                       st->user_err++;
+       }
+
+       return 0;
 }
 
 /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
@@ -357,12 +372,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        struct vm_area_struct *vma;
        unsigned long nr_pages;
        LIST_HEAD(pagelist);
-       int *err_array = NULL;
        struct mmap_batch_state state;
 
-       if (!xen_initial_domain())
-               return -EPERM;
-
        switch (version) {
        case 1:
                if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
@@ -396,10 +407,12 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
                goto out;
        }
 
-       err_array = kcalloc(m.num, sizeof(int), GFP_KERNEL);
-       if (err_array == NULL) {
-               ret = -ENOMEM;
-               goto out;
+       if (version == 2) {
+               /* Zero error array now to only copy back actual errors. */
+               if (clear_user(m.err, sizeof(int) * m.num)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        }
 
        down_write(&mm->mmap_sem);
@@ -427,7 +440,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        state.va            = m.addr;
        state.index         = 0;
        state.global_error  = 0;
-       state.err           = err_array;
+       state.version       = version;
 
        /* mmap_batch_fn guarantees ret == 0 */
        BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
@@ -435,21 +448,14 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 
        up_write(&mm->mmap_sem);
 
-       if (version == 1) {
-               if (state.global_error) {
-                       /* Write back errors in second pass. */
-                       state.user_mfn = (xen_pfn_t *)m.arr;
-                       state.err      = err_array;
-                       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                                            &pagelist, mmap_return_errors_v1, &state);
-               } else
-                       ret = 0;
-
-       } else if (version == 2) {
-               ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
-               if (ret)
-                       ret = -EFAULT;
-       }
+       if (state.global_error) {
+               /* Write back errors in second pass. */
+               state.user_mfn = (xen_pfn_t *)m.arr;
+               state.user_err = m.err;
+               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                                                        &pagelist, mmap_return_errors, &state);
+       } else
+               ret = 0;
 
        /* If we have not had any EFAULT-like global errors then set the global
         * error to -ENOENT if necessary. */
@@ -457,7 +463,6 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
                ret = -ENOENT;
 
 out:
-       kfree(err_array);
        free_page_list(&pagelist);
 
        return ret;
index a7def010eba3116ac73375f7704c06165c0e0699..f72af87640e076a429af64d050cd23e397e5ff3c 100644 (file)
@@ -124,7 +124,7 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
 static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
                                             struct pci_dev *dev)
 {
-       if (xen_pcibk_backend && xen_pcibk_backend->free)
+       if (xen_pcibk_backend && xen_pcibk_backend->release)
                return xen_pcibk_backend->release(pdev, dev);
 }
 
index 97f5d264c31ee6f464b92e5b6a2656bff6f00b7f..37c1f825f513764c998e808acf482d9d20cda7e0 100644 (file)
@@ -135,7 +135,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
                         struct pci_dev *dev, struct xen_pci_op *op)
 {
        struct xen_pcibk_dev_data *dev_data;
-       int otherend = pdev->xdev->otherend_id;
        int status;
 
        if (unlikely(verbose_request))
@@ -144,8 +143,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
        status = pci_enable_msi(dev);
 
        if (status) {
-               printk(KERN_ERR "error enable msi for guest %x status %x\n",
-                       otherend, status);
+               pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
+                                   pci_name(dev), pdev->xdev->otherend_id,
+                                   status);
                op->value = 0;
                return XEN_PCI_ERR_op_failed;
        }
@@ -223,10 +223,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
                                                pci_name(dev), i,
                                                op->msix_entries[i].vector);
                }
-       } else {
-               printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
-                       pci_name(dev), result);
-       }
+       } else
+               pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
+                                   pci_name(dev), pdev->xdev->otherend_id,
+                                   result);
        kfree(entries);
 
        op->value = result;
index cfe512fd1caffd8829d0c3f6ce1314cc8e873714..780725a463b1b9001dd089ecb4fddce87b35c51a 100644 (file)
@@ -68,16 +68,6 @@ source "fs/quota/Kconfig"
 source "fs/autofs4/Kconfig"
 source "fs/fuse/Kconfig"
 
-config CUSE
-       tristate "Character device in Userspace support"
-       depends on FUSE_FS
-       help
-         This FUSE extension allows character devices to be
-         implemented in userspace.
-
-         If you want to develop or use userspace character device
-         based on CUSE, answer Y or M.
-
 config GENERIC_ACL
        bool
        select FS_POSIX_ACL
index 0c42cdbabecff0c4c8d9d0e597273ff4fce79507..49d0b43458b700d54a02bb1fc54a84d6cec10efa 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/elf.h>
 #include <linux/utsname.h>
 #include <linux/coredump.h>
+#include <linux/sched.h>
 #include <asm/uaccess.h>
 #include <asm/param.h>
 #include <asm/page.h>
@@ -1320,8 +1321,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
                cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
        } else {
-               cputime_to_timeval(p->utime, &prstatus->pr_utime);
-               cputime_to_timeval(p->stime, &prstatus->pr_stime);
+               cputime_t utime, stime;
+
+               task_cputime(p, &utime, &stime);
+               cputime_to_timeval(utime, &prstatus->pr_utime);
+               cputime_to_timeval(stime, &prstatus->pr_stime);
        }
        cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
        cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
index dc84732e554f679dee6de0d95e7eab2b5bcda5b9..cb240dd3b402bafa8bc7fdca8f8d84526b558e82 100644 (file)
@@ -1375,8 +1375,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
                cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
                cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
        } else {
-               cputime_to_timeval(p->utime, &prstatus->pr_utime);
-               cputime_to_timeval(p->stime, &prstatus->pr_stime);
+               cputime_t utime, stime;
+
+               task_cputime(p, &utime, &stime);
+               cputime_to_timeval(utime, &prstatus->pr_utime);
+               cputime_to_timeval(stime, &prstatus->pr_stime);
        }
        cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
        cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
index 521e9d4424f64f81d69d98709031a342d5ee808f..5a3327b8f90d557db144b360b3df4ec9bce9ff50 100644 (file)
@@ -3997,7 +3997,7 @@ again:
         * We make the other tasks wait for the flush only when we can flush
         * all things.
         */
-       if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) {
+       if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
                flushing = true;
                space_info->flush = 1;
        }
@@ -4534,7 +4534,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        unsigned nr_extents = 0;
        int extra_reserve = 0;
        enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
-       int ret;
+       int ret = 0;
        bool delalloc_lock = true;
 
        /* If we are a free space inode we need to not flush since we will be in
@@ -4579,20 +4579,18 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
-       if (root->fs_info->quota_enabled) {
+       if (root->fs_info->quota_enabled)
                ret = btrfs_qgroup_reserve(root, num_bytes +
                                           nr_extents * root->leafsize);
-               if (ret) {
-                       spin_lock(&BTRFS_I(inode)->lock);
-                       calc_csum_metadata_size(inode, num_bytes, 0);
-                       spin_unlock(&BTRFS_I(inode)->lock);
-                       if (delalloc_lock)
-                               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
-                       return ret;
-               }
-       }
 
-       ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+       /*
+        * ret != 0 here means the qgroup reservation failed, we go straight to
+        * the shared error handling then.
+        */
+       if (ret == 0)
+               ret = reserve_metadata_bytes(root, block_rsv,
+                                            to_reserve, flush);
+
        if (ret) {
                u64 to_free = 0;
                unsigned dropped;
@@ -5560,7 +5558,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        int empty_cluster = 2 * 1024 * 1024;
        struct btrfs_space_info *space_info;
        int loop = 0;
-       int index = 0;
+       int index = __get_raid_index(data);
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
                RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
        bool found_uncached_bg = false;
@@ -6788,11 +6786,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                                                       &wc->flags[level]);
                        if (ret < 0) {
                                btrfs_tree_unlock_rw(eb, path->locks[level]);
+                               path->locks[level] = 0;
                                return ret;
                        }
                        BUG_ON(wc->refs[level] == 0);
                        if (wc->refs[level] == 1) {
                                btrfs_tree_unlock_rw(eb, path->locks[level]);
+                               path->locks[level] = 0;
                                return 1;
                        }
                }
index f169d6b11d7f6a093158428352a44f35d4bae952..fdb7a8db3b5748911e5820f9b588c5a264a6a8bc 100644 (file)
@@ -171,6 +171,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
        if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
                return 0;
 
+       if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
+           test_bit(EXTENT_FLAG_LOGGING, &next->flags))
+               return 0;
+
        if (extent_map_end(prev) == next->start &&
            prev->flags == next->flags &&
            prev->bdev == next->bdev &&
@@ -255,7 +259,8 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
        if (!em)
                goto out;
 
-       list_move(&em->list, &tree->modified_extents);
+       if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
+               list_move(&em->list, &tree->modified_extents);
        em->generation = gen;
        clear_bit(EXTENT_FLAG_PINNED, &em->flags);
        em->mod_start = em->start;
@@ -280,6 +285,13 @@ out:
 
 }
 
+void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
+{
+       clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
+       if (em->in_tree)
+               try_merge_map(tree, em);
+}
+
 /**
  * add_extent_mapping - add new extent map to the extent tree
  * @tree:      tree to insert new map in
index 922943ce29e8caebe0a69feda2a7e481bed0d556..c6598c89cff8c48676dd6179f649ef6f0638acad 100644 (file)
@@ -69,6 +69,7 @@ void free_extent_map(struct extent_map *em);
 int __init extent_map_init(void);
 void extent_map_exit(void);
 int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
+void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
 struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
                                         u64 start, u64 len);
 #endif
index bd38cef4235882425c08eb6a5d10306736a14237..94aa53b387213bbfc13181163368e5e168574d77 100644 (file)
@@ -460,8 +460,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                if (!contig)
                        offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 
-               if (!contig && (offset >= ordered->file_offset + ordered->len ||
-                   offset < ordered->file_offset)) {
+               if (offset >= ordered->file_offset + ordered->len ||
+                   offset < ordered->file_offset) {
                        unsigned long bytes_left;
                        sums->len = this_sum_bytes;
                        this_sum_bytes = 0;
index 77061bf43edbae995bf55ce73dcc89bf73012400..aeb84469d2c4c0621b002084617578f7ac5f49b1 100644 (file)
@@ -293,15 +293,24 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
        struct btrfs_key key;
        struct btrfs_ioctl_defrag_range_args range;
        int num_defrag;
+       int index;
+       int ret;
 
        /* get the inode */
        key.objectid = defrag->root;
        btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
        key.offset = (u64)-1;
+
+       index = srcu_read_lock(&fs_info->subvol_srcu);
+
        inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
        if (IS_ERR(inode_root)) {
-               kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-               return PTR_ERR(inode_root);
+               ret = PTR_ERR(inode_root);
+               goto cleanup;
+       }
+       if (btrfs_root_refs(&inode_root->root_item) == 0) {
+               ret = -ENOENT;
+               goto cleanup;
        }
 
        key.objectid = defrag->ino;
@@ -309,9 +318,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
        key.offset = 0;
        inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
        if (IS_ERR(inode)) {
-               kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-               return PTR_ERR(inode);
+               ret = PTR_ERR(inode);
+               goto cleanup;
        }
+       srcu_read_unlock(&fs_info->subvol_srcu, index);
 
        /* do a chunk of defrag */
        clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
@@ -346,6 +356,10 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
 
        iput(inode);
        return 0;
+cleanup:
+       srcu_read_unlock(&fs_info->subvol_srcu, index);
+       kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
+       return ret;
 }
 
 /*
@@ -1594,9 +1608,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                if (err < 0 && num_written > 0)
                        num_written = err;
        }
-out:
+
        if (sync)
                atomic_dec(&BTRFS_I(inode)->sync_writers);
+out:
        sb_end_write(inode->i_sb);
        current->backing_dev_info = NULL;
        return num_written ? num_written : err;
@@ -2241,6 +2256,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
        if (lockend <= lockstart)
                lockend = lockstart + root->sectorsize;
 
+       lockend--;
        len = lockend - lockstart + 1;
 
        len = max_t(u64, len, root->sectorsize);
@@ -2307,9 +2323,12 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
                                        }
                                }
 
-                               *offset = start;
-                               free_extent_map(em);
-                               break;
+                               if (!test_bit(EXTENT_FLAG_PREALLOC,
+                                             &em->flags)) {
+                                       *offset = start;
+                                       free_extent_map(em);
+                                       break;
+                               }
                        }
                }
 
index 59ea2e4349c9cdbecb105a3a384376f52b0a5da6..0be7a8742a43bb502c9b9294661baaef6e5514e1 100644 (file)
@@ -1862,11 +1862,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
-       int ret = 0;
+       int ret;
+       bool re_search = false;
 
        spin_lock(&ctl->tree_lock);
 
 again:
+       ret = 0;
        if (!bytes)
                goto out_lock;
 
@@ -1879,17 +1881,17 @@ again:
                info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                          1, 0);
                if (!info) {
-                       /* the tree logging code might be calling us before we
-                        * have fully loaded the free space rbtree for this
-                        * block group.  So it is possible the entry won't
-                        * be in the rbtree yet at all.  The caching code
-                        * will make sure not to put it in the rbtree if
-                        * the logging code has pinned it.
+                       /*
+                        * If we found a partial bit of our free space in a
+                        * bitmap but then couldn't find the other part this may
+                        * be a problem, so WARN about it.
                         */
+                       WARN_ON(re_search);
                        goto out_lock;
                }
        }
 
+       re_search = false;
        if (!info->bitmap) {
                unlink_free_space(ctl, info);
                if (offset == info->offset) {
@@ -1935,8 +1937,10 @@ again:
        }
 
        ret = remove_from_bitmap(ctl, info, &offset, &bytes);
-       if (ret == -EAGAIN)
+       if (ret == -EAGAIN) {
+               re_search = true;
                goto again;
+       }
        BUG_ON(ret); /* logic error */
 out_lock:
        spin_unlock(&ctl->tree_lock);
index 16d9e8e191e6ac34d92da08fe90b46e2de42ebf8..cc93b23ca3520c9ad7bf2919e77f61cf400c2e4c 100644 (file)
@@ -88,7 +88,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
        [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
 };
 
-static int btrfs_setsize(struct inode *inode, loff_t newsize);
+static int btrfs_setsize(struct inode *inode, struct iattr *attr);
 static int btrfs_truncate(struct inode *inode);
 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
 static noinline int cow_file_range(struct inode *inode,
@@ -2478,6 +2478,18 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
+
+                       /* 1 for the orphan item deletion. */
+                       trans = btrfs_start_transaction(root, 1);
+                       if (IS_ERR(trans)) {
+                               ret = PTR_ERR(trans);
+                               goto out;
+                       }
+                       ret = btrfs_orphan_add(trans, inode);
+                       btrfs_end_transaction(trans, root);
+                       if (ret)
+                               goto out;
+
                        ret = btrfs_truncate(inode);
                } else {
                        nr_unlink++;
@@ -3665,6 +3677,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                block_end - cur_offset, 0);
                if (IS_ERR(em)) {
                        err = PTR_ERR(em);
+                       em = NULL;
                        break;
                }
                last_byte = min(extent_map_end(em), block_end);
@@ -3748,16 +3761,27 @@ next:
        return err;
 }
 
-static int btrfs_setsize(struct inode *inode, loff_t newsize)
+static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        loff_t oldsize = i_size_read(inode);
+       loff_t newsize = attr->ia_size;
+       int mask = attr->ia_valid;
        int ret;
 
        if (newsize == oldsize)
                return 0;
 
+       /*
+        * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
+        * special case where we need to update the times despite not having
+        * these flags set.  For all other operations the VFS set these flags
+        * explicitly if it wants a timestamp update.
+        */
+       if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
+               inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
+
        if (newsize > oldsize) {
                truncate_pagecache(inode, oldsize, newsize);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
@@ -3783,9 +3807,34 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                        set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
                                &BTRFS_I(inode)->runtime_flags);
 
+               /*
+                * 1 for the orphan item we're going to add
+                * 1 for the orphan item deletion.
+                */
+               trans = btrfs_start_transaction(root, 2);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+
+               /*
+                * We need to do this in case we fail at _any_ point during the
+                * actual truncate.  Once we do the truncate_setsize we could
+                * invalidate pages which forces any outstanding ordered io to
+                * be instantly completed which will give us extents that need
+                * to be truncated.  If we fail to get an orphan inode down we
+                * could have left over extents that were never meant to live,
+                * so we need to garuntee from this point on that everything
+                * will be consistent.
+                */
+               ret = btrfs_orphan_add(trans, inode);
+               btrfs_end_transaction(trans, root);
+               if (ret)
+                       return ret;
+
                /* we don't support swapfiles, so vmtruncate shouldn't fail */
                truncate_setsize(inode, newsize);
                ret = btrfs_truncate(inode);
+               if (ret && inode->i_nlink)
+                       btrfs_orphan_del(NULL, inode);
        }
 
        return ret;
@@ -3805,7 +3854,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
                return err;
 
        if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
-               err = btrfs_setsize(inode, attr->ia_size);
+               err = btrfs_setsize(inode, attr);
                if (err)
                        return err;
        }
@@ -5572,10 +5621,13 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
                return em;
        if (em) {
                /*
-                * if our em maps to a hole, there might
-                * actually be delalloc bytes behind it
+                * if our em maps to
+                * -  a hole or
+                * -  a pre-alloc extent,
+                * there might actually be delalloc bytes behind it.
                 */
-               if (em->block_start != EXTENT_MAP_HOLE)
+               if (em->block_start != EXTENT_MAP_HOLE &&
+                   !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
                        return em;
                else
                        hole_em = em;
@@ -5657,6 +5709,8 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
                         */
                        em->block_start = hole_em->block_start;
                        em->block_len = hole_len;
+                       if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
+                               set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
                } else {
                        em->start = range_start;
                        em->len = found;
@@ -6915,11 +6969,9 @@ static int btrfs_truncate(struct inode *inode)
 
        /*
         * 1 for the truncate slack space
-        * 1 for the orphan item we're going to add
-        * 1 for the orphan item deletion
         * 1 for updating the inode.
         */
-       trans = btrfs_start_transaction(root, 4);
+       trans = btrfs_start_transaction(root, 2);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
                goto out;
@@ -6930,12 +6982,6 @@ static int btrfs_truncate(struct inode *inode)
                                      min_size);
        BUG_ON(ret);
 
-       ret = btrfs_orphan_add(trans, inode);
-       if (ret) {
-               btrfs_end_transaction(trans, root);
-               goto out;
-       }
-
        /*
         * setattr is responsible for setting the ordered_data_close flag,
         * but that is only tested during the last file release.  That
@@ -7004,12 +7050,6 @@ static int btrfs_truncate(struct inode *inode)
                ret = btrfs_orphan_del(trans, inode);
                if (ret)
                        err = ret;
-       } else if (ret && inode->i_nlink > 0) {
-               /*
-                * Failed to do the truncate, remove us from the in memory
-                * orphan list.
-                */
-               ret = btrfs_orphan_del(NULL, inode);
        }
 
        if (trans) {
@@ -7531,41 +7571,61 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
  */
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
 {
-       struct list_head *head = &root->fs_info->delalloc_inodes;
        struct btrfs_inode *binode;
        struct inode *inode;
        struct btrfs_delalloc_work *work, *next;
        struct list_head works;
+       struct list_head splice;
        int ret = 0;
 
        if (root->fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
        INIT_LIST_HEAD(&works);
-
+       INIT_LIST_HEAD(&splice);
+again:
        spin_lock(&root->fs_info->delalloc_lock);
-       while (!list_empty(head)) {
-               binode = list_entry(head->next, struct btrfs_inode,
+       list_splice_init(&root->fs_info->delalloc_inodes, &splice);
+       while (!list_empty(&splice)) {
+               binode = list_entry(splice.next, struct btrfs_inode,
                                    delalloc_inodes);
+
+               list_del_init(&binode->delalloc_inodes);
+
                inode = igrab(&binode->vfs_inode);
                if (!inode)
-                       list_del_init(&binode->delalloc_inodes);
+                       continue;
+
+               list_add_tail(&binode->delalloc_inodes,
+                             &root->fs_info->delalloc_inodes);
                spin_unlock(&root->fs_info->delalloc_lock);
-               if (inode) {
-                       work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
-                       if (!work) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       list_add_tail(&work->list, &works);
-                       btrfs_queue_worker(&root->fs_info->flush_workers,
-                                          &work->work);
+
+               work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
+               if (unlikely(!work)) {
+                       ret = -ENOMEM;
+                       goto out;
                }
+               list_add_tail(&work->list, &works);
+               btrfs_queue_worker(&root->fs_info->flush_workers,
+                                  &work->work);
+
                cond_resched();
                spin_lock(&root->fs_info->delalloc_lock);
        }
        spin_unlock(&root->fs_info->delalloc_lock);
 
+       list_for_each_entry_safe(work, next, &works, list) {
+               list_del_init(&work->list);
+               btrfs_wait_and_free_delalloc_work(work);
+       }
+
+       spin_lock(&root->fs_info->delalloc_lock);
+       if (!list_empty(&root->fs_info->delalloc_inodes)) {
+               spin_unlock(&root->fs_info->delalloc_lock);
+               goto again;
+       }
+       spin_unlock(&root->fs_info->delalloc_lock);
+
        /* the filemap_flush will queue IO into the worker threads, but
         * we have to make sure the IO is actually started and that
         * ordered extents get created before we return
@@ -7578,11 +7638,18 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
                    atomic_read(&root->fs_info->async_delalloc_pages) == 0));
        }
        atomic_dec(&root->fs_info->async_submit_draining);
+       return 0;
 out:
        list_for_each_entry_safe(work, next, &works, list) {
                list_del_init(&work->list);
                btrfs_wait_and_free_delalloc_work(work);
        }
+
+       if (!list_empty_careful(&splice)) {
+               spin_lock(&root->fs_info->delalloc_lock);
+               list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
+               spin_unlock(&root->fs_info->delalloc_lock);
+       }
        return ret;
 }
 
index 4b4516770f055432964da1e82fe591e1e2128ef8..338f2597bf7f8da2215e0d87289b01c94f9cfe7c 100644 (file)
@@ -515,7 +515,6 @@ static noinline int create_subvol(struct btrfs_root *root,
 
        BUG_ON(ret);
 
-       d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
 fail:
        if (async_transid) {
                *async_transid = trans->transid;
@@ -525,6 +524,10 @@ fail:
        }
        if (err && !ret)
                ret = err;
+
+       if (!ret)
+               d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
+
        return ret;
 }
 
@@ -1339,7 +1342,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
-               return -EINPROGRESS;
+               mnt_drop_write_file(file);
+               return -EINVAL;
        }
 
        mutex_lock(&root->fs_info->volume_mutex);
@@ -1362,6 +1366,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                printk(KERN_INFO "btrfs: resizing devid %llu\n",
                       (unsigned long long)devid);
        }
+
        device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
        if (!device) {
                printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
@@ -1369,9 +1374,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                ret = -EINVAL;
                goto out_free;
        }
-       if (device->fs_devices && device->fs_devices->seeding) {
+
+       if (!device->writeable) {
                printk(KERN_INFO "btrfs: resizer unable to apply on "
-                      "seeding device %llu\n",
+                      "readonly device %llu\n",
                       (unsigned long long)devid);
                ret = -EINVAL;
                goto out_free;
@@ -1443,8 +1449,8 @@ out_free:
        kfree(vol_args);
 out:
        mutex_unlock(&root->fs_info->volume_mutex);
-       mnt_drop_write_file(file);
        atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+       mnt_drop_write_file(file);
        return ret;
 }
 
@@ -2095,13 +2101,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
                if (err)
                        goto out_dput;
-
-               /* check if subvolume may be deleted by a non-root user */
-               err = btrfs_may_delete(dir, dentry, 1);
-               if (err)
-                       goto out_dput;
        }
 
+       /* check if subvolume may be deleted by a user */
+       err = btrfs_may_delete(dir, dentry, 1);
+       if (err)
+               goto out_dput;
+
        if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
                err = -EINVAL;
                goto out_dput;
@@ -2183,19 +2189,20 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
        struct btrfs_ioctl_defrag_range_args *range;
        int ret;
 
-       if (btrfs_root_readonly(root))
-               return -EROFS;
+       ret = mnt_want_write_file(file);
+       if (ret)
+               return ret;
 
        if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
-               return -EINPROGRESS;
+               mnt_drop_write_file(file);
+               return -EINVAL;
        }
-       ret = mnt_want_write_file(file);
-       if (ret) {
-               atomic_set(&root->fs_info->mutually_exclusive_operation_running,
-                          0);
-               return ret;
+
+       if (btrfs_root_readonly(root)) {
+               ret = -EROFS;
+               goto out;
        }
 
        switch (inode->i_mode & S_IFMT) {
@@ -2247,8 +2254,8 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                ret = -EINVAL;
        }
 out:
-       mnt_drop_write_file(file);
        atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+       mnt_drop_write_file(file);
        return ret;
 }
 
@@ -2263,7 +2270,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
        if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
-               return -EINPROGRESS;
+               return -EINVAL;
        }
 
        mutex_lock(&root->fs_info->volume_mutex);
@@ -2300,7 +2307,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
                        1)) {
                pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
                mnt_drop_write_file(file);
-               return -EINPROGRESS;
+               return -EINVAL;
        }
 
        mutex_lock(&root->fs_info->volume_mutex);
@@ -2316,8 +2323,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
        kfree(vol_args);
 out:
        mutex_unlock(&root->fs_info->volume_mutex);
-       mnt_drop_write_file(file);
        atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
+       mnt_drop_write_file(file);
        return ret;
 }
 
@@ -3437,8 +3444,8 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_ioctl_balance_args *bargs;
        struct btrfs_balance_control *bctl;
+       bool need_unlock; /* for mut. excl. ops lock */
        int ret;
-       int need_to_clear_lock = 0;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -3447,14 +3454,61 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        if (ret)
                return ret;
 
-       mutex_lock(&fs_info->volume_mutex);
+again:
+       if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
+               mutex_lock(&fs_info->volume_mutex);
+               mutex_lock(&fs_info->balance_mutex);
+               need_unlock = true;
+               goto locked;
+       }
+
+       /*
+        * mut. excl. ops lock is locked.  Three possibilites:
+        *   (1) some other op is running
+        *   (2) balance is running
+        *   (3) balance is paused -- special case (think resume)
+        */
        mutex_lock(&fs_info->balance_mutex);
+       if (fs_info->balance_ctl) {
+               /* this is either (2) or (3) */
+               if (!atomic_read(&fs_info->balance_running)) {
+                       mutex_unlock(&fs_info->balance_mutex);
+                       if (!mutex_trylock(&fs_info->volume_mutex))
+                               goto again;
+                       mutex_lock(&fs_info->balance_mutex);
+
+                       if (fs_info->balance_ctl &&
+                           !atomic_read(&fs_info->balance_running)) {
+                               /* this is (3) */
+                               need_unlock = false;
+                               goto locked;
+                       }
+
+                       mutex_unlock(&fs_info->balance_mutex);
+                       mutex_unlock(&fs_info->volume_mutex);
+                       goto again;
+               } else {
+                       /* this is (2) */
+                       mutex_unlock(&fs_info->balance_mutex);
+                       ret = -EINPROGRESS;
+                       goto out;
+               }
+       } else {
+               /* this is (1) */
+               mutex_unlock(&fs_info->balance_mutex);
+               pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+locked:
+       BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
 
        if (arg) {
                bargs = memdup_user(arg, sizeof(*bargs));
                if (IS_ERR(bargs)) {
                        ret = PTR_ERR(bargs);
-                       goto out;
+                       goto out_unlock;
                }
 
                if (bargs->flags & BTRFS_BALANCE_RESUME) {
@@ -3474,13 +3528,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
                bargs = NULL;
        }
 
-       if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
-                       1)) {
-               pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
+       if (fs_info->balance_ctl) {
                ret = -EINPROGRESS;
                goto out_bargs;
        }
-       need_to_clear_lock = 1;
 
        bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
        if (!bctl) {
@@ -3501,11 +3552,17 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
        }
 
 do_balance:
-       ret = btrfs_balance(bctl, bargs);
        /*
-        * bctl is freed in __cancel_balance or in free_fs_info if
-        * restriper was paused all the way until unmount
+        * Ownership of bctl and mutually_exclusive_operation_running
+        * goes to to btrfs_balance.  bctl is freed in __cancel_balance,
+        * or, if restriper was paused all the way until unmount, in
+        * free_fs_info.  mutually_exclusive_operation_running is
+        * cleared in __cancel_balance.
         */
+       need_unlock = false;
+
+       ret = btrfs_balance(bctl, bargs);
+
        if (arg) {
                if (copy_to_user(arg, bargs, sizeof(*bargs)))
                        ret = -EFAULT;
@@ -3513,12 +3570,12 @@ do_balance:
 
 out_bargs:
        kfree(bargs);
-out:
-       if (need_to_clear_lock)
-               atomic_set(&root->fs_info->mutually_exclusive_operation_running,
-                          0);
+out_unlock:
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
+       if (need_unlock)
+               atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+out:
        mnt_drop_write_file(file);
        return ret;
 }
@@ -3698,6 +3755,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
                goto drop_write;
        }
 
+       if (!sa->qgroupid) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        trans = btrfs_join_transaction(root);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
index f107312970405da1e3218118a8d0555894955c59..e5ed56729607a82246cac22a229d105efa562509 100644 (file)
@@ -836,9 +836,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * if the disk i_size is already at the inode->i_size, or
         * this ordered extent is inside the disk i_size, we're done
         */
-       if (disk_i_size == i_size || offset <= disk_i_size) {
+       if (disk_i_size == i_size)
+               goto out;
+
+       /*
+        * We still need to update disk_i_size if outstanding_isize is greater
+        * than disk_i_size.
+        */
+       if (offset <= disk_i_size &&
+           (!ordered || ordered->outstanding_isize <= disk_i_size))
                goto out;
-       }
 
        /*
         * walk backward from this ordered extent to disk_i_size.
@@ -870,7 +877,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                        break;
                if (test->file_offset >= i_size)
                        break;
-               if (test->file_offset >= disk_i_size) {
+               if (entry_end(test) > disk_i_size) {
                        /*
                         * we don't update disk_i_size now, so record this
                         * undealt i_size. Or we will not know the real
index fe9d02c45f8e521f87b44d6deff3e5f8d99aa3a9..a5c856234323241c22dde18e1ee072c8bed3ccaa 100644 (file)
@@ -379,6 +379,13 @@ next1:
 
                ret = add_relation_rb(fs_info, found_key.objectid,
                                      found_key.offset);
+               if (ret == -ENOENT) {
+                       printk(KERN_WARNING
+                               "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
+                               (unsigned long long)found_key.objectid,
+                               (unsigned long long)found_key.offset);
+                       ret = 0;        /* ignore the error */
+               }
                if (ret)
                        goto out;
 next2:
@@ -956,17 +963,28 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
                        struct btrfs_fs_info *fs_info, u64 qgroupid)
 {
        struct btrfs_root *quota_root;
+       struct btrfs_qgroup *qgroup;
        int ret = 0;
 
        quota_root = fs_info->quota_root;
        if (!quota_root)
                return -EINVAL;
 
+       /* check if there are no relations to this qgroup */
+       spin_lock(&fs_info->qgroup_lock);
+       qgroup = find_qgroup_rb(fs_info, qgroupid);
+       if (qgroup) {
+               if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
+                       spin_unlock(&fs_info->qgroup_lock);
+                       return -EBUSY;
+               }
+       }
+       spin_unlock(&fs_info->qgroup_lock);
+
        ret = del_qgroup_item(trans, quota_root, qgroupid);
 
        spin_lock(&fs_info->qgroup_lock);
        del_qgroup_rb(quota_root->fs_info, qgroupid);
-
        spin_unlock(&fs_info->qgroup_lock);
 
        return ret;
index bdbb94f245c9070802c65acb6eba392ba1a4c932..67783e03d1211bdcba1e84a20aa56a528916c370 100644 (file)
@@ -580,20 +580,29 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
        int corrected = 0;
        struct btrfs_key key;
        struct inode *inode = NULL;
+       struct btrfs_fs_info *fs_info;
        u64 end = offset + PAGE_SIZE - 1;
        struct btrfs_root *local_root;
+       int srcu_index;
 
        key.objectid = root;
        key.type = BTRFS_ROOT_ITEM_KEY;
        key.offset = (u64)-1;
-       local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
-       if (IS_ERR(local_root))
+
+       fs_info = fixup->root->fs_info;
+       srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
+
+       local_root = btrfs_read_fs_root_no_name(fs_info, &key);
+       if (IS_ERR(local_root)) {
+               srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
                return PTR_ERR(local_root);
+       }
 
        key.type = BTRFS_INODE_ITEM_KEY;
        key.objectid = inum;
        key.offset = 0;
-       inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
+       inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
+       srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
@@ -606,7 +615,6 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
        }
 
        if (PageUptodate(page)) {
-               struct btrfs_fs_info *fs_info;
                if (PageDirty(page)) {
                        /*
                         * we need to write the data to the defect sector. the
@@ -3180,18 +3188,25 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
        u64 physical_for_dev_replace;
        u64 len;
        struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
+       int srcu_index;
 
        key.objectid = root;
        key.type = BTRFS_ROOT_ITEM_KEY;
        key.offset = (u64)-1;
+
+       srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
+
        local_root = btrfs_read_fs_root_no_name(fs_info, &key);
-       if (IS_ERR(local_root))
+       if (IS_ERR(local_root)) {
+               srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
                return PTR_ERR(local_root);
+       }
 
        key.type = BTRFS_INODE_ITEM_KEY;
        key.objectid = inum;
        key.offset = 0;
        inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
+       srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
index 54454542ad4073352ddcabf27fd9e0bc528e2136..321b7fb4e4417573e9c404069c6b249afb8ab488 100644 (file)
@@ -1814,8 +1814,10 @@ static int name_cache_insert(struct send_ctx *sctx,
                        (unsigned long)nce->ino);
        if (!nce_head) {
                nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
-               if (!nce_head)
+               if (!nce_head) {
+                       kfree(nce);
                        return -ENOMEM;
+               }
                INIT_LIST_HEAD(nce_head);
 
                ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
index 99545df1b86c18071fed0cbf68dcbb8e745d5524..d8982e9601d37862e6405df95df15c1ba34eecb9 100644 (file)
@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
                             function, line, errstr);
                return;
        }
-       trans->transaction->aborted = errno;
+       ACCESS_ONCE(trans->transaction->aborted) = errno;
        __btrfs_std_error(root->fs_info, function, line, errno, NULL);
 }
 /*
index 87fac9a21ea56578625536ac1229678e854ec5f7..fc03aa60b68440862e4884f90cb88113d15610a7 100644 (file)
@@ -333,12 +333,14 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
                                          &root->fs_info->trans_block_rsv,
                                          num_bytes, flush);
                if (ret)
-                       return ERR_PTR(ret);
+                       goto reserve_fail;
        }
 again:
        h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
-       if (!h)
-               return ERR_PTR(-ENOMEM);
+       if (!h) {
+               ret = -ENOMEM;
+               goto alloc_fail;
+       }
 
        /*
         * If we are JOIN_NOLOCK we're already committing a transaction and
@@ -365,11 +367,7 @@ again:
        if (ret < 0) {
                /* We must get the transaction if we are JOIN_NOLOCK. */
                BUG_ON(type == TRANS_JOIN_NOLOCK);
-
-               if (type < TRANS_JOIN_NOLOCK)
-                       sb_end_intwrite(root->fs_info->sb);
-               kmem_cache_free(btrfs_trans_handle_cachep, h);
-               return ERR_PTR(ret);
+               goto join_fail;
        }
 
        cur_trans = root->fs_info->running_transaction;
@@ -410,6 +408,19 @@ got_it:
        if (!current->journal_info && type != TRANS_USERSPACE)
                current->journal_info = h;
        return h;
+
+join_fail:
+       if (type < TRANS_JOIN_NOLOCK)
+               sb_end_intwrite(root->fs_info->sb);
+       kmem_cache_free(btrfs_trans_handle_cachep, h);
+alloc_fail:
+       if (num_bytes)
+               btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
+                                       num_bytes);
+reserve_fail:
+       if (qgroup_reserved)
+               btrfs_qgroup_free(root, qgroup_reserved);
+       return ERR_PTR(ret);
 }
 
 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
@@ -1468,7 +1479,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
        }
 
-       if (cur_trans->aborted) {
+       /* Stop the commit early if ->aborted is set */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
                ret = cur_trans->aborted;
                goto cleanup_transaction;
        }
@@ -1574,6 +1586,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        wait_event(cur_trans->writer_wait,
                   atomic_read(&cur_trans->num_writers) == 1);
 
+       /* ->aborted might be set after the previous check, so check it */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
+               goto cleanup_transaction;
+       }
        /*
         * the reloc mutex makes sure that we stop
         * the balancing code from coming in and moving
@@ -1657,6 +1674,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                goto cleanup_transaction;
        }
 
+       /*
+        * The tasks which save the space cache and inode cache may also
+        * update ->aborted, check it.
+        */
+       if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
+               ret = cur_trans->aborted;
+               mutex_unlock(&root->fs_info->tree_log_mutex);
+               mutex_unlock(&root->fs_info->reloc_mutex);
+               goto cleanup_transaction;
+       }
+
        btrfs_prepare_extent_commit(trans, root);
 
        cur_trans = root->fs_info->running_transaction;
index 83186c7e45d40db89192abad9da56628874cf5d1..9027bb1e74660758328a3d133fe58b59ddc460e6 100644 (file)
@@ -3357,6 +3357,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
        if (skip_csum)
                return 0;
 
+       if (em->compress_type) {
+               csum_offset = 0;
+               csum_len = block_len;
+       }
+
        /* block start is already adjusted for the file extent offset. */
        ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
                                       em->block_start + csum_offset,
@@ -3410,13 +3415,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
                em = list_entry(extents.next, struct extent_map, list);
 
                list_del_init(&em->list);
-               clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
 
                /*
                 * If we had an error we just need to delete everybody from our
                 * private list.
                 */
                if (ret) {
+                       clear_em_logging(tree, em);
                        free_extent_map(em);
                        continue;
                }
@@ -3424,8 +3429,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
                write_unlock(&tree->lock);
 
                ret = log_one_extent(trans, inode, root, em, path);
-               free_extent_map(em);
                write_lock(&tree->lock);
+               clear_em_logging(tree, em);
+               free_extent_map(em);
        }
        WARN_ON(!list_empty(&extents));
        write_unlock(&tree->lock);
index 5cce6aa7401287322c31673cf039f0e32918c6c4..5cbb7f4b16720fc3c3442ffb09752983195836a5 100644 (file)
@@ -1431,7 +1431,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                }
        } else {
                ret = btrfs_get_bdev_and_sb(device_path,
-                                           FMODE_READ | FMODE_EXCL,
+                                           FMODE_WRITE | FMODE_EXCL,
                                            root->fs_info->bdev_holder, 0,
                                            &bdev, &bh);
                if (ret)
@@ -1556,7 +1556,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        ret = 0;
 
        /* Notify udev that device has changed */
-       btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
+       if (bdev)
+               btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
 
 error_brelse:
        brelse(bh);
@@ -2614,7 +2615,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
        cache = btrfs_lookup_block_group(fs_info, chunk_offset);
        chunk_used = btrfs_block_group_used(&cache->item);
 
-       user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
+       if (bargs->usage == 0)
+               user_thresh = 0;
+       else if (bargs->usage > 100)
+               user_thresh = cache->key.offset;
+       else
+               user_thresh = div_factor_fine(cache->key.offset,
+                                             bargs->usage);
+
        if (chunk_used < user_thresh)
                ret = 0;
 
@@ -2959,6 +2967,8 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
        unset_balance_control(fs_info);
        ret = del_balance_item(fs_info->tree_root);
        BUG_ON(ret);
+
+       atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
 }
 
 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
@@ -3138,8 +3148,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
 out:
        if (bctl->flags & BTRFS_BALANCE_RESUME)
                __cancel_balance(fs_info);
-       else
+       else {
                kfree(bctl);
+               atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
+       }
        return ret;
 }
 
@@ -3156,7 +3168,6 @@ static int balance_kthread(void *data)
                ret = btrfs_balance(fs_info->balance_ctl, NULL);
        }
 
-       atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
        mutex_unlock(&fs_info->balance_mutex);
        mutex_unlock(&fs_info->volume_mutex);
 
@@ -3179,7 +3190,6 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
                return 0;
        }
 
-       WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
        tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
        if (IS_ERR(tsk))
                return PTR_ERR(tsk);
@@ -3233,6 +3243,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
        btrfs_balance_sys(leaf, item, &disk_bargs);
        btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
 
+       WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
+
        mutex_lock(&fs_info->volume_mutex);
        mutex_lock(&fs_info->balance_mutex);
 
@@ -3496,7 +3508,7 @@ struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        { 1, 1, 2, 2, 2, 2 /* raid1 */ },
        { 1, 2, 1, 1, 1, 2 /* dup */ },
        { 1, 1, 0, 2, 1, 1 /* raid0 */ },
-       { 1, 1, 0, 1, 1, 1 /* single */ },
+       { 1, 1, 1, 1, 1, 1 /* single */ },
 };
 
 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
index c017a2dfb9097e73837230d244ffb14f73dd0e1c..7a75c3e0fd5896b7fc59595fff859c41f23f65b1 100644 (file)
@@ -2935,6 +2935,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
                void *kaddr = kmap_atomic(bh->b_page);
                memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
                kunmap_atomic(kaddr);
+               flush_dcache_page(bh->b_page);
        }
 }
 
index ce5cbd717bfc54be99e746089efe1fd17a8d593c..210fce2df3087a3738291f714e9ac99c18f390f8 100644 (file)
@@ -226,6 +226,8 @@ compose_mount_options_out:
 compose_mount_options_err:
        kfree(mountdata);
        mountdata = ERR_PTR(rc);
+       kfree(*devname);
+       *devname = NULL;
        goto compose_mount_options_out;
 }
 
index 17c3643e595095597e90026ea6a337edb7bd8b0b..12b3da39733b2bc180cd1ac4a164d9f0d071e99a 100644 (file)
@@ -1917,7 +1917,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
        }
        case AF_INET6: {
                struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
-               struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs;
+               struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
                return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
        }
        default:
index 153bb1e42e631e07b41c5d87d82fd660388ed22e..a5f12b7e228d487029ea83ab72482cdfc8bb2a61 100644 (file)
@@ -176,7 +176,7 @@ static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
                        opts->uid = uid;
                        break;
                case Opt_gid:
-                       if (match_octal(&args[0], &option))
+                       if (match_int(&args[0], &option))
                                return -EINVAL;
                        gid = make_kgid(current_user_ns(), option);
                        if (!gid_valid(gid))
index 7ff49852b0cb75163ff9646dfb16195678971899..911649a47dd5ae5b000bfabb2b73b0c71a95ec52 100644 (file)
@@ -503,11 +503,11 @@ static ssize_t device_write(struct file *file, const char __user *buf,
 #endif
                return -EINVAL;
 
-#ifdef CONFIG_COMPAT
-       if (count > sizeof(struct dlm_write_request32) + DLM_RESNAME_MAXLEN)
-#else
+       /*
+        * can't compare against COMPAT/dlm_write_request32 because
+        * we don't yet know if is64bit is zero
+        */
        if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
-#endif
                return -EINVAL;
 
        kbuf = kzalloc(count + 1, GFP_NOFS);
index 18c45cac368fe3ec830c7f0c8433e5cd2db0fc2d..20df02c1cc70190b04802493d5a25b802e0adcba 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -434,8 +434,9 @@ static int count(struct user_arg_ptr argv, int max)
                        if (IS_ERR(p))
                                return -EFAULT;
 
-                       if (i++ >= max)
+                       if (i >= max)
                                return -E2BIG;
+                       ++i;
 
                        if (fatal_signal_pending(current))
                                return -ERESTARTNOHAND;
index e95b94945d5f4e8d93413c77c23d1db4d414b85b..137af4255da6dfab591e4d640821539663d01a7c 100644 (file)
@@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
                retval = f2fs_getxattr(inode, name_index, "", value, retval);
        }
 
-       if (retval < 0) {
-               if (retval == -ENODATA)
-                       acl = NULL;
-               else
-                       acl = ERR_PTR(retval);
-       } else {
+       if (retval > 0)
                acl = f2fs_acl_from_disk(value, retval);
-       }
+       else if (retval == -ENODATA)
+               acl = NULL;
+       else
+               acl = ERR_PTR(retval);
        kfree(value);
+
        if (!IS_ERR(acl))
                set_cached_acl(inode, type, acl);
 
index 6ef36c37e2be22395335b2467706c1255ed9ecac..ff3c8439af874e059e8474cb8025f5583dea3149 100644 (file)
@@ -214,7 +214,6 @@ retry:
                goto retry;
        }
        new->ino = ino;
-       INIT_LIST_HEAD(&new->list);
 
        /* add new_oentry into list which is sorted by inode number */
        if (orphan) {
@@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi)
        sbi->n_orphans = 0;
 }
 
-int create_checkpoint_caches(void)
+int __init create_checkpoint_caches(void)
 {
        orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
                        sizeof(struct orphan_inode_entry), NULL);
index 3aa5ce7cab83d30012e86942bfed4317ac0b2b99..7bd22a201125636397c0d79d15a2822e83c130f4 100644 (file)
@@ -547,6 +547,15 @@ redirty_out:
 
 #define MAX_DESIRED_PAGES_WP   4096
 
+static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
+                       void *data)
+{
+       struct address_space *mapping = data;
+       int ret = mapping->a_ops->writepage(page, wbc);
+       mapping_set_error(mapping, ret);
+       return ret;
+}
+
 static int f2fs_write_data_pages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
@@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
        if (!S_ISDIR(inode->i_mode))
                mutex_lock(&sbi->writepages);
-       ret = generic_writepages(mapping, wbc);
+       ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
        if (!S_ISDIR(inode->i_mode))
                mutex_unlock(&sbi->writepages);
        f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
@@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page)
        return 0;
 }
 
+static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
+{
+       return generic_block_bmap(mapping, block, get_data_block_ro);
+}
+
 const struct address_space_operations f2fs_dblock_aops = {
        .readpage       = f2fs_read_data_page,
        .readpages      = f2fs_read_data_pages,
@@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = {
        .invalidatepage = f2fs_invalidate_data_page,
        .releasepage    = f2fs_release_data_page,
        .direct_IO      = f2fs_direct_IO,
+       .bmap           = f2fs_bmap,
 };
index 0e0380a588ad1502a32633343d2af26d8f075da3..c8c37307b326c5e1e5d25428f85e78cd3a62fa69 100644 (file)
@@ -26,6 +26,7 @@
 
 static LIST_HEAD(f2fs_stat_list);
 static struct dentry *debugfs_root;
+static DEFINE_MUTEX(f2fs_stat_mutex);
 
 static void update_general_status(struct f2fs_sb_info *sbi)
 {
@@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v)
        int i = 0;
        int j;
 
+       mutex_lock(&f2fs_stat_mutex);
        list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
 
-               mutex_lock(&si->stat_lock);
-               if (!si->sbi) {
-                       mutex_unlock(&si->stat_lock);
-                       continue;
-               }
                update_general_status(si->sbi);
 
                seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
-               seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ",
-                          si->nat_area_segs, si->sit_area_segs);
+               seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
+                          si->sit_area_segs, si->nat_area_segs);
                seq_printf(s, "[SSA: %d] [MAIN: %d",
                           si->ssa_area_segs, si->main_area_segs);
                seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
@@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v)
                seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
                                (si->base_mem + si->cache_mem) >> 10,
                                si->base_mem >> 10, si->cache_mem >> 10);
-               mutex_unlock(&si->stat_lock);
        }
+       mutex_unlock(&f2fs_stat_mutex);
        return 0;
 }
 
@@ -303,7 +300,7 @@ static const struct file_operations stat_fops = {
        .release = single_release,
 };
 
-static int init_stats(struct f2fs_sb_info *sbi)
+int f2fs_build_stats(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        struct f2fs_stat_info *si;
@@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi)
                return -ENOMEM;
 
        si = sbi->stat_info;
-       mutex_init(&si->stat_lock);
-       list_add_tail(&si->stat_list, &f2fs_stat_list);
-
        si->all_area_segs = le32_to_cpu(raw_super->segment_count);
        si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
        si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi)
        si->main_area_zones = si->main_area_sections /
                                le32_to_cpu(raw_super->secs_per_zone);
        si->sbi = sbi;
-       return 0;
-}
 
-int f2fs_build_stats(struct f2fs_sb_info *sbi)
-{
-       int retval;
-
-       retval = init_stats(sbi);
-       if (retval)
-               return retval;
-
-       if (!debugfs_root)
-               debugfs_root = debugfs_create_dir("f2fs", NULL);
+       mutex_lock(&f2fs_stat_mutex);
+       list_add_tail(&si->stat_list, &f2fs_stat_list);
+       mutex_unlock(&f2fs_stat_mutex);
 
-       debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops);
        return 0;
 }
 
@@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
 {
        struct f2fs_stat_info *si = sbi->stat_info;
 
+       mutex_lock(&f2fs_stat_mutex);
        list_del(&si->stat_list);
-       mutex_lock(&si->stat_lock);
-       si->sbi = NULL;
-       mutex_unlock(&si->stat_lock);
+       mutex_unlock(&f2fs_stat_mutex);
+
        kfree(sbi->stat_info);
 }
 
-void destroy_root_stats(void)
+void __init f2fs_create_root_stats(void)
+{
+       debugfs_root = debugfs_create_dir("f2fs", NULL);
+       if (debugfs_root)
+               debugfs_create_file("status", S_IRUGO, debugfs_root,
+                                        NULL, &stat_fops);
+}
+
+void f2fs_destroy_root_stats(void)
 {
        debugfs_remove_recursive(debugfs_root);
        debugfs_root = NULL;
index 951ed52748f6171a3bc1097a3d806f0df37c778c..989980e16d0b03da6174582518f9fe10f25ca811 100644 (file)
@@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
        }
 
        if (inode) {
-               inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+               inode->i_ctime = CURRENT_TIME;
                drop_nlink(inode);
                if (S_ISDIR(inode->i_mode)) {
                        drop_nlink(inode);
index 13c6dfbb71833e202ffec05a21ee8f0dceb5c046..c8e2d751ef9cd53a614cb875c1f8a9fe8bcdb241 100644 (file)
@@ -211,11 +211,11 @@ struct dnode_of_data {
 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
                struct page *ipage, struct page *npage, nid_t nid)
 {
+       memset(dn, 0, sizeof(*dn));
        dn->inode = inode;
        dn->inode_page = ipage;
        dn->node_page = npage;
        dn->nid = nid;
-       dn->inode_page_locked = 0;
 }
 
 /*
@@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *);
  * super.c
  */
 int f2fs_sync_fs(struct super_block *, int);
+extern __printf(3, 4)
+void f2fs_msg(struct super_block *, const char *, const char *, ...);
 
 /*
  * hash.c
@@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int,
 void flush_nat_entries(struct f2fs_sb_info *);
 int build_node_manager(struct f2fs_sb_info *);
 void destroy_node_manager(struct f2fs_sb_info *);
-int create_node_manager_caches(void);
+int __init create_node_manager_caches(void);
 void destroy_node_manager_caches(void);
 
 /*
@@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *);
 void block_operations(struct f2fs_sb_info *);
 void write_checkpoint(struct f2fs_sb_info *, bool, bool);
 void init_orphan_info(struct f2fs_sb_info *);
-int create_checkpoint_caches(void);
+int __init create_checkpoint_caches(void);
 void destroy_checkpoint_caches(void);
 
 /*
@@ -984,9 +986,9 @@ int do_write_data_page(struct page *);
 int start_gc_thread(struct f2fs_sb_info *);
 void stop_gc_thread(struct f2fs_sb_info *);
 block_t start_bidx_of_node(unsigned int);
-int f2fs_gc(struct f2fs_sb_info *, int);
+int f2fs_gc(struct f2fs_sb_info *);
 void build_gc_manager(struct f2fs_sb_info *);
-int create_gc_caches(void);
+int __init create_gc_caches(void);
 void destroy_gc_caches(void);
 
 /*
@@ -1058,7 +1060,8 @@ struct f2fs_stat_info {
 
 int f2fs_build_stats(struct f2fs_sb_info *);
 void f2fs_destroy_stats(struct f2fs_sb_info *);
-void destroy_root_stats(void);
+void __init f2fs_create_root_stats(void);
+void f2fs_destroy_root_stats(void);
 #else
 #define stat_inc_call_count(si)
 #define stat_inc_seg_count(si, type)
@@ -1068,7 +1071,8 @@ void destroy_root_stats(void);
 
 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline void destroy_root_stats(void) { }
+static inline void __init f2fs_create_root_stats(void) { }
+static inline void f2fs_destroy_root_stats(void) { }
 #endif
 
 extern const struct file_operations f2fs_dir_operations;
index 7f9ea9271ebeaa4e1da9440758c5fe6c2cee62f3..3191b52aafb04cdce0ead1a5dbdb4a1ed6c7b082 100644 (file)
@@ -96,8 +96,9 @@ out:
 }
 
 static const struct vm_operations_struct f2fs_file_vm_ops = {
-       .fault        = filemap_fault,
-       .page_mkwrite = f2fs_vm_page_mkwrite,
+       .fault          = filemap_fault,
+       .page_mkwrite   = f2fs_vm_page_mkwrite,
+       .remap_pages    = generic_file_remap_pages,
 };
 
 static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
@@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        if (ret)
                return ret;
 
+       /* guarantee free sections for fsync */
+       f2fs_balance_fs(sbi);
+
        mutex_lock(&inode->i_mutex);
 
        if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
@@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
                struct dnode_of_data dn;
                struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
+               f2fs_balance_fs(sbi);
+
                mutex_lock_op(sbi, DATA_TRUNC);
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                err = get_dnode_of_data(&dn, index, RDONLY_NODE);
@@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode,
                                loff_t offset, loff_t len)
 {
        struct inode *inode = file->f_path.dentry->d_inode;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        long ret;
 
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode,
        else
                ret = expand_inode_data(inode, offset, len, mode);
 
-       f2fs_balance_fs(sbi);
+       if (!ret) {
+               inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+               mark_inode_dirty(inode);
+       }
        return ret;
 }
 
index b0ec721e984a324e3e239d4cdfe946cb0a5b8df0..c386910dacc53b5300f019eec517fa88a04461bc 100644 (file)
@@ -78,7 +78,7 @@ static int gc_thread_func(void *data)
 
                sbi->bg_gc++;
 
-               if (f2fs_gc(sbi, 1) == GC_NONE)
+               if (f2fs_gc(sbi) == GC_NONE)
                        wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
                else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
                        wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -424,7 +424,11 @@ next_step:
 }
 
 /*
- * Calculate start block index that this node page contains
+ * Calculate start block index indicating the given node offset.
+ * Be careful, caller should give this node offset only indicating direct node
+ * blocks. If any node offsets, which point the other types of node blocks such
+ * as indirect or double indirect node blocks, are given, it must be a caller's
+ * bug.
  */
 block_t start_bidx_of_node(unsigned int node_ofs)
 {
@@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
        return ret;
 }
 
-int f2fs_gc(struct f2fs_sb_info *sbi, int nGC)
+int f2fs_gc(struct f2fs_sb_info *sbi)
 {
-       unsigned int segno;
-       int old_free_secs, cur_free_secs;
-       int gc_status, nfree;
        struct list_head ilist;
+       unsigned int segno, i;
        int gc_type = BG_GC;
+       int gc_status = GC_NONE;
 
        INIT_LIST_HEAD(&ilist);
 gc_more:
-       nfree = 0;
-       gc_status = GC_NONE;
+       if (!(sbi->sb->s_flags & MS_ACTIVE))
+               goto stop;
 
        if (has_not_enough_free_secs(sbi))
-               old_free_secs = reserved_sections(sbi);
-       else
-               old_free_secs = free_sections(sbi);
-
-       while (sbi->sb->s_flags & MS_ACTIVE) {
-               int i;
-               if (has_not_enough_free_secs(sbi))
-                       gc_type = FG_GC;
+               gc_type = FG_GC;
 
-               cur_free_secs = free_sections(sbi) + nfree;
+       if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+               goto stop;
 
-               /* We got free space successfully. */
-               if (nGC < cur_free_secs - old_free_secs)
-                       break;
-
-               if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+       for (i = 0; i < sbi->segs_per_sec; i++) {
+               /*
+                * do_garbage_collect will give us three gc_status:
+                * GC_ERROR, GC_DONE, and GC_BLOCKED.
+                * If GC is finished uncleanly, we have to return
+                * the victim to dirty segment list.
+                */
+               gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
+               if (gc_status != GC_DONE)
                        break;
-
-               for (i = 0; i < sbi->segs_per_sec; i++) {
-                       /*
-                        * do_garbage_collect will give us three gc_status:
-                        * GC_ERROR, GC_DONE, and GC_BLOCKED.
-                        * If GC is finished uncleanly, we have to return
-                        * the victim to dirty segment list.
-                        */
-                       gc_status = do_garbage_collect(sbi, segno + i,
-                                       &ilist, gc_type);
-                       if (gc_status != GC_DONE)
-                               goto stop;
-                       nfree++;
-               }
        }
-stop:
-       if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) {
+       if (has_not_enough_free_secs(sbi)) {
                write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
-               if (nfree)
+               if (has_not_enough_free_secs(sbi))
                        goto gc_more;
        }
+stop:
        mutex_unlock(&sbi->gc_mutex);
 
        put_gc_inode(&ilist);
-       BUG_ON(!list_empty(&ilist));
        return gc_status;
 }
 
@@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
        DIRTY_I(sbi)->v_ops = &default_v_ops;
 }
 
-int create_gc_caches(void)
+int __init create_gc_caches(void)
 {
        winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
                        sizeof(struct inode_entry), NULL);
index bf20b4d03214f01c2a2fa3b278612dc52d73896a..79424177732209f207ce78aee69c5020d90ede31 100644 (file)
@@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
                        inode->i_ino == F2FS_META_INO(sbi))
                return 0;
 
+       if (wbc)
+               f2fs_balance_fs(sbi);
+
        node_page = get_node_page(sbi, inode->i_ino);
        if (IS_ERR(node_page))
                return PTR_ERR(node_page);
index 5066bfd256c973657ec28795e2d87af557b13b89..9bda63c9c166a886c1585cc34de06dfef3f59cc2 100644 (file)
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,
        return 0;
 }
 
+/*
+ * It is very important to gather dirty pages and write at once, so that we can
+ * submit a big bio without interfering other data writes.
+ * Be default, 512 pages (2MB), a segment size, is quite reasonable.
+ */
+#define COLLECT_DIRTY_NODES    512
 static int f2fs_write_node_pages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,
        struct block_device *bdev = sbi->sb->s_bdev;
        long nr_to_write = wbc->nr_to_write;
 
-       if (wbc->for_kupdate)
-               return 0;
-
-       if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
-               return 0;
-
+       /* First check balancing cached NAT entries */
        if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
                write_checkpoint(sbi, false, false);
                return 0;
        }
 
+       /* collect a number of dirty node pages and write together */
+       if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
+               return 0;
+
        /* if mounting is failed, skip writing node pages */
        wbc->nr_to_write = bio_get_nr_vecs(bdev);
        sync_node_pages(sbi, 0, wbc);
@@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
        kfree(nm_i);
 }
 
-int create_node_manager_caches(void)
+int __init create_node_manager_caches(void)
 {
        nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
                        sizeof(struct nat_entry), NULL);
index b571fee677d5837e4481c805f82f3f38f8975281..f42e4060b3991c140fd7e3e3de58d40d30dfcbc4 100644 (file)
@@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
                kunmap(page);
                f2fs_put_page(page, 0);
        } else {
-               f2fs_add_link(&dent, inode);
+               err = f2fs_add_link(&dent, inode);
        }
        iput(dir);
 out:
@@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
                                goto out;
                        }
 
-                       INIT_LIST_HEAD(&entry->list);
                        list_add_tail(&entry->list, head);
                        entry->blkaddr = blkaddr;
                }
@@ -174,10 +173,9 @@ out:
 static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
                                        struct list_head *head)
 {
-       struct list_head *this;
-       struct fsync_inode_entry *entry;
-       list_for_each(this, head) {
-               entry = list_entry(this, struct fsync_inode_entry, list);
+       struct fsync_inode_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, head, list) {
                iput(entry->inode);
                list_del(&entry->list);
                kmem_cache_free(fsync_entry_slab, entry);
index de6240922b0a8f14d4a17109013d09e432b0fb6e..4b009906658235d428f5455ca8da2f038e61997e 100644 (file)
@@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
         */
        if (has_not_enough_free_secs(sbi)) {
                mutex_lock(&sbi->gc_mutex);
-               f2fs_gc(sbi, 1);
+               f2fs_gc(sbi);
        }
 }
 
index 08a94c814bdc9a9a5047e632863ac1d03bdfcd8d..37fad04c866907969a128876aaedd8b34c3209ef 100644 (file)
@@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = {
        {Opt_err, NULL},
 };
 
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+       va_end(args);
+}
+
 static void init_once(void *foo)
 {
        struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
 
        if (sync)
                write_checkpoint(sbi, false, false);
+       else
+               f2fs_balance_fs(sbi);
 
        return 0;
 }
@@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = {
        .get_parent = f2fs_get_parent,
 };
 
-static int parse_options(struct f2fs_sb_info *sbi, char *options)
+static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
+                               char *options)
 {
        substring_t args[MAX_OPT_ARGS];
        char *p;
@@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
                        break;
 #else
                case Opt_nouser_xattr:
-                       pr_info("nouser_xattr options not supported\n");
+                       f2fs_msg(sb, KERN_INFO,
+                               "nouser_xattr options not supported");
                        break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
                        break;
 #else
                case Opt_noacl:
-                       pr_info("noacl options not supported\n");
+                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
                        break;
 #endif
                case Opt_active_logs:
@@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
                        set_opt(sbi, DISABLE_EXT_IDENTIFY);
                        break;
                default:
-                       pr_err("Unrecognized mount option \"%s\" or missing value\n",
-                                       p);
+                       f2fs_msg(sb, KERN_ERR,
+                               "Unrecognized mount option \"%s\" or missing value",
+                               p);
                        return -EINVAL;
                }
        }
@@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits)
        return result;
 }
 
-static int sanity_check_raw_super(struct f2fs_super_block *raw_super)
+static int sanity_check_raw_super(struct super_block *sb,
+                       struct f2fs_super_block *raw_super)
 {
        unsigned int blocksize;
 
-       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic))
+       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Magic Mismatch, valid(0x%x) - read(0x%x)",
+                       F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
                return 1;
+       }
 
        /* Currently, support only 4KB block size */
        blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
-       if (blocksize != PAGE_CACHE_SIZE)
+       if (blocksize != PAGE_CACHE_SIZE) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid blocksize (%u), supports only 4KB\n",
+                       blocksize);
                return 1;
+       }
        if (le32_to_cpu(raw_super->log_sectorsize) !=
-                                       F2FS_LOG_SECTOR_SIZE)
+                                       F2FS_LOG_SECTOR_SIZE) {
+               f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
                return 1;
+       }
        if (le32_to_cpu(raw_super->log_sectors_per_block) !=
-                                       F2FS_LOG_SECTORS_PER_BLOCK)
+                                       F2FS_LOG_SECTORS_PER_BLOCK) {
+               f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
                return 1;
+       }
        return 0;
 }
 
@@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        if (!sbi)
                return -ENOMEM;
 
-       /* set a temporary block size */
-       if (!sb_set_blocksize(sb, F2FS_BLKSIZE))
+       /* set a block size */
+       if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
+               f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
                goto free_sbi;
+       }
 
        /* read f2fs raw super block */
        raw_super_buf = sb_bread(sb, 0);
        if (!raw_super_buf) {
                err = -EIO;
+               f2fs_msg(sb, KERN_ERR, "unable to read superblock");
                goto free_sbi;
        }
        raw_super = (struct f2fs_super_block *)
@@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        set_opt(sbi, POSIX_ACL);
 #endif
        /* parse mount options */
-       if (parse_options(sbi, (char *)data))
+       if (parse_options(sb, sbi, (char *)data))
                goto free_sb_buf;
 
        /* sanity checking of raw super */
-       if (sanity_check_raw_super(raw_super))
+       if (sanity_check_raw_super(sb, raw_super)) {
+               f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
                goto free_sb_buf;
+       }
 
        sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
        sb->s_max_links = F2FS_LINK_MAX;
@@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        /* get an inode for meta space */
        sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
        if (IS_ERR(sbi->meta_inode)) {
+               f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
                err = PTR_ERR(sbi->meta_inode);
                goto free_sb_buf;
        }
 
        err = get_valid_checkpoint(sbi);
-       if (err)
+       if (err) {
+               f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
                goto free_meta_inode;
+       }
 
        /* sanity checking of checkpoint */
        err = -EINVAL;
-       if (sanity_check_ckpt(raw_super, sbi->ckpt))
+       if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
+               f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
                goto free_cp;
+       }
 
        sbi->total_valid_node_count =
                                le32_to_cpu(sbi->ckpt->valid_node_count);
@@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        INIT_LIST_HEAD(&sbi->dir_inode_list);
        spin_lock_init(&sbi->dir_inode_lock);
 
-       /* init super block */
-       if (!sb_set_blocksize(sb, sbi->blocksize))
-               goto free_cp;
-
        init_orphan_info(sbi);
 
        /* setup f2fs internal modules */
        err = build_segment_manager(sbi);
-       if (err)
+       if (err) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Failed to initialize F2FS segment manager");
                goto free_sm;
+       }
        err = build_node_manager(sbi);
-       if (err)
+       if (err) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Failed to initialize F2FS node manager");
                goto free_nm;
+       }
 
        build_gc_manager(sbi);
 
        /* get an inode for node space */
        sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
        if (IS_ERR(sbi->node_inode)) {
+               f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
                err = PTR_ERR(sbi->node_inode);
                goto free_nm;
        }
@@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        /* read root inode and dentry */
        root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
        if (IS_ERR(root)) {
+               f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
                err = PTR_ERR(root);
                goto free_node_inode;
        }
@@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = {
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
-static int init_inodecache(void)
+static int __init init_inodecache(void)
 {
        f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
                        sizeof(struct f2fs_inode_info), NULL);
@@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void)
        err = create_checkpoint_caches();
        if (err)
                goto fail;
-       return register_filesystem(&f2fs_fs_type);
+       err = register_filesystem(&f2fs_fs_type);
+       if (err)
+               goto fail;
+       f2fs_create_root_stats();
 fail:
        return err;
 }
 
 static void __exit exit_f2fs_fs(void)
 {
-       destroy_root_stats();
+       f2fs_destroy_root_stats();
        unregister_filesystem(&f2fs_fs_type);
        destroy_checkpoint_caches();
        destroy_gc_caches();
index 940136a3d3a61a05494f9163c5cbd9cf19874a37..8038c049650473bd260d50006ca75175ec90fa62 100644 (file)
@@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        if (name_len > 255 || value_len > MAX_VALUE_LEN)
                return -ERANGE;
 
+       f2fs_balance_fs(sbi);
+
        mutex_lock_op(sbi, NODE_NEW);
        if (!fi->i_xattr_nid) {
                /* Allocate new attribute block */
index 0cf160a94eda06afe6055d3c9e0aa806337121e3..1b2f6c2c3aaff697d2909cb88953e61b9d57b82b 100644 (file)
@@ -4,12 +4,24 @@ config FUSE_FS
          With FUSE it is possible to implement a fully functional filesystem
          in a userspace program.
 
-         There's also companion library: libfuse.  This library along with
-         utilities is available from the FUSE homepage:
+         There's also a companion library: libfuse2.  This library is available
+         from the FUSE homepage:
          <http://fuse.sourceforge.net/>
+         although chances are your distribution already has that library
+         installed if you've installed the "fuse" package itself.
 
          See <file:Documentation/filesystems/fuse.txt> for more information.
          See <file:Documentation/Changes> for needed library/utility version.
 
          If you want to develop a userspace FS, or if you want to use
          a filesystem based on FUSE, answer Y or M.
+
+config CUSE
+       tristate "Character device in Userspace support"
+       depends on FUSE_FS
+       help
+         This FUSE extension allows character devices to be
+         implemented in userspace.
+
+         If you want to develop or use a userspace character device
+         based on CUSE, answer Y or M.
index ee8d55042298272f6ac6c76982f4ecd5efb745ca..e397b675b029a6e9f2d6f7f59a0d73acec412fbe 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/spinlock.h>
 #include <linux/stat.h>
 #include <linux/module.h>
 
@@ -63,7 +62,7 @@ struct cuse_conn {
        bool                    unrestricted_ioctl;
 };
 
-static DEFINE_SPINLOCK(cuse_lock);             /* protects cuse_conntbl */
+static DEFINE_MUTEX(cuse_lock);                /* protects registration */
 static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
 static struct class *cuse_class;
 
@@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file)
        int rc;
 
        /* look up and get the connection */
-       spin_lock(&cuse_lock);
+       mutex_lock(&cuse_lock);
        list_for_each_entry(pos, cuse_conntbl_head(devt), list)
                if (pos->dev->devt == devt) {
                        fuse_conn_get(&pos->fc);
                        cc = pos;
                        break;
                }
-       spin_unlock(&cuse_lock);
+       mutex_unlock(&cuse_lock);
 
        /* dead? */
        if (!cc)
@@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
 static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
 {
        char *end = p + len;
-       char *key, *val;
+       char *uninitialized_var(key), *uninitialized_var(val);
        int rc;
 
        while (true) {
@@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev)
  */
 static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
 {
-       struct cuse_conn *cc = fc_to_cc(fc);
+       struct cuse_conn *cc = fc_to_cc(fc), *pos;
        struct cuse_init_out *arg = req->out.args[0].value;
        struct page *page = req->pages[0];
        struct cuse_devinfo devinfo = { };
        struct device *dev;
        struct cdev *cdev;
        dev_t devt;
-       int rc;
+       int rc, i;
 
        if (req->out.h.error ||
            arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
@@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
        dev_set_drvdata(dev, cc);
        dev_set_name(dev, "%s", devinfo.name);
 
+       mutex_lock(&cuse_lock);
+
+       /* make sure the device-name is unique */
+       for (i = 0; i < CUSE_CONNTBL_LEN; ++i) {
+               list_for_each_entry(pos, &cuse_conntbl[i], list)
+                       if (!strcmp(dev_name(pos->dev), dev_name(dev)))
+                               goto err_unlock;
+       }
+
        rc = device_add(dev);
        if (rc)
-               goto err_device;
+               goto err_unlock;
 
        /* register cdev */
        rc = -ENOMEM;
        cdev = cdev_alloc();
        if (!cdev)
-               goto err_device;
+               goto err_unlock;
 
        cdev->owner = THIS_MODULE;
        cdev->ops = &cuse_frontend_fops;
@@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
        cc->cdev = cdev;
 
        /* make the device available */
-       spin_lock(&cuse_lock);
        list_add(&cc->list, cuse_conntbl_head(devt));
-       spin_unlock(&cuse_lock);
+       mutex_unlock(&cuse_lock);
 
        /* announce device availability */
        dev_set_uevent_suppress(dev, 0);
@@ -391,7 +398,8 @@ out:
 
 err_cdev:
        cdev_del(cdev);
-err_device:
+err_unlock:
+       mutex_unlock(&cuse_lock);
        put_device(dev);
 err_region:
        unregister_chrdev_region(devt, 1);
@@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
        int rc;
 
        /* remove from the conntbl, no more access from this point on */
-       spin_lock(&cuse_lock);
+       mutex_lock(&cuse_lock);
        list_del_init(&cc->list);
-       spin_unlock(&cuse_lock);
+       mutex_unlock(&cuse_lock);
 
        /* remove device */
        if (cc->dev)
index c16335315e5da8843a41dc9de6ea5f27cae6df12..e83351aa5baddedc2d8000c3ac82936a25da28b7 100644 (file)
@@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
        struct page *oldpage = *pagep;
        struct page *newpage;
        struct pipe_buffer *buf = cs->pipebufs;
-       struct address_space *mapping;
-       pgoff_t index;
 
        unlock_request(cs->fc, cs->req);
        fuse_copy_finish(cs);
@@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
        if (fuse_check_page(newpage) != 0)
                goto out_fallback_unlock;
 
-       mapping = oldpage->mapping;
-       index = oldpage->index;
-
        /*
         * This is a new and locked page, it shouldn't be mapped or
         * have any special flags on it
index e21d4d8f87e36e45daf71f044712710f12b23322..f3ab824fa302bcae5909adf5c676c4bbb9417743 100644 (file)
@@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        return ret;
 }
 
-long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
-                           loff_t length)
+static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
+                               loff_t length)
 {
        struct fuse_file *ff = file->private_data;
        struct fuse_conn *fc = ff->fc;
@@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(fuse_file_fallocate);
 
 static const struct file_operations fuse_file_operations = {
        .llseek         = fuse_file_llseek,
index b906ed17a8391a99ffbc15d8ea62c6861be90689..9802de0f85e61fe061ea150d15177c39b4128ca3 100644 (file)
@@ -281,6 +281,7 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+       int lvb_needs_unlock = 0;
        int error;
 
        if (gl->gl_lksb.sb_lkid == 0) {
@@ -294,8 +295,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
        gfs2_update_request_times(gl);
 
        /* don't want to skip dlm_unlock writing the lvb when lock is ex */
+
+       if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
+               lvb_needs_unlock = 1;
+
        if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
-           gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) {
+           !lvb_needs_unlock) {
                gfs2_glock_free(gl);
                return;
        }
index a2862339323b2a5f1e08dd7e25a779e1b683b828..81cc7eaff86321c2788c0aa83c236778e6fb4b70 100644 (file)
@@ -446,7 +446,8 @@ int __log_start_commit(journal_t *journal, tid_t target)
         * currently running transaction (if it exists).  Otherwise,
         * the target tid must be an old one.
         */
-       if (journal->j_running_transaction &&
+       if (journal->j_commit_request != target &&
+           journal->j_running_transaction &&
            journal->j_running_transaction->t_tid == target) {
                /*
                 * We want a new commit: OK, mark the request and wakeup the
index dd057bc6b65b28d3822229e0c0d5d0777b7955b9..fc8dc20fdeb9c90274d5acb8ef2bb831af297425 100644 (file)
@@ -177,11 +177,31 @@ out_nofree:
        return mnt;
 }
 
+static int
+nfs_namespace_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+       if (NFS_FH(dentry->d_inode)->size != 0)
+               return nfs_getattr(mnt, dentry, stat);
+       generic_fillattr(dentry->d_inode, stat);
+       return 0;
+}
+
+static int
+nfs_namespace_setattr(struct dentry *dentry, struct iattr *attr)
+{
+       if (NFS_FH(dentry->d_inode)->size != 0)
+               return nfs_setattr(dentry, attr);
+       return -EACCES;
+}
+
 const struct inode_operations nfs_mountpoint_inode_operations = {
        .getattr        = nfs_getattr,
+       .setattr        = nfs_setattr,
 };
 
 const struct inode_operations nfs_referral_inode_operations = {
+       .getattr        = nfs_namespace_getattr,
+       .setattr        = nfs_namespace_setattr,
 };
 
 static void nfs_expire_automounts(struct work_struct *work)
index acc3472681244d004f743a198e802709a38927b4..2e9779b58b7af6f3d143e60af3fba6abc8902a11 100644 (file)
@@ -236,11 +236,10 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        error = nfs4_discover_server_trunking(clp, &old);
        if (error < 0)
                goto error;
+       nfs_put_client(clp);
        if (clp != old) {
                clp->cl_preserve_clid = true;
-               nfs_put_client(clp);
                clp = old;
-               atomic_inc(&clp->cl_count);
        }
 
        return clp;
@@ -306,7 +305,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
                .clientid       = new->cl_clientid,
                .confirm        = new->cl_confirm,
        };
-       int status;
+       int status = -NFS4ERR_STALE_CLIENTID;
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
@@ -332,40 +331,33 @@ int nfs40_walk_client_list(struct nfs_client *new,
 
                if (prev)
                        nfs_put_client(prev);
+               prev = pos;
 
                status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
-               if (status == 0) {
+               switch (status) {
+               case -NFS4ERR_STALE_CLIENTID:
+                       break;
+               case 0:
                        nfs4_swap_callback_idents(pos, new);
 
-                       nfs_put_client(pos);
+                       prev = NULL;
                        *result = pos;
                        dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
                                __func__, pos, atomic_read(&pos->cl_count));
-                       return 0;
-               }
-               if (status != -NFS4ERR_STALE_CLIENTID) {
-                       nfs_put_client(pos);
-                       dprintk("NFS: <-- %s status = %d, no result\n",
-                               __func__, status);
-                       return status;
+               default:
+                       goto out;
                }
 
                spin_lock(&nn->nfs_client_lock);
-               prev = pos;
        }
+       spin_unlock(&nn->nfs_client_lock);
 
-       /*
-        * No matching nfs_client found.  This should be impossible,
-        * because the new nfs_client has already been added to
-        * nfs_client_list by nfs_get_client().
-        *
-        * Don't BUG(), since the caller is holding a mutex.
-        */
+       /* No match found. The server lost our clientid */
+out:
        if (prev)
                nfs_put_client(prev);
-       spin_unlock(&nn->nfs_client_lock);
-       pr_err("NFS: %s Error: no matching nfs_client found\n", __func__);
-       return -NFS4ERR_STALE_CLIENTID;
+       dprintk("NFS: <-- %s status = %d\n", __func__, status);
+       return status;
 }
 
 #ifdef CONFIG_NFS_V4_1
@@ -432,7 +424,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
 {
        struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
        struct nfs_client *pos, *n, *prev = NULL;
-       int error;
+       int status = -NFS4ERR_STALE_CLIENTID;
 
        spin_lock(&nn->nfs_client_lock);
        list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) {
@@ -448,14 +440,17 @@ int nfs41_walk_client_list(struct nfs_client *new,
                                nfs_put_client(prev);
                        prev = pos;
 
-                       error = nfs_wait_client_init_complete(pos);
-                       if (error < 0) {
+                       nfs4_schedule_lease_recovery(pos);
+                       status = nfs_wait_client_init_complete(pos);
+                       if (status < 0) {
                                nfs_put_client(pos);
                                spin_lock(&nn->nfs_client_lock);
                                continue;
                        }
-
+                       status = pos->cl_cons_state;
                        spin_lock(&nn->nfs_client_lock);
+                       if (status < 0)
+                               continue;
                }
 
                if (pos->rpc_ops != new->rpc_ops)
@@ -473,6 +468,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (!nfs4_match_serverowners(pos, new))
                        continue;
 
+               atomic_inc(&pos->cl_count);
                spin_unlock(&nn->nfs_client_lock);
                dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n",
                        __func__, pos, atomic_read(&pos->cl_count));
@@ -481,16 +477,10 @@ int nfs41_walk_client_list(struct nfs_client *new,
                return 0;
        }
 
-       /*
-        * No matching nfs_client found.  This should be impossible,
-        * because the new nfs_client has already been added to
-        * nfs_client_list by nfs_get_client().
-        *
-        * Don't BUG(), since the caller is holding a mutex.
-        */
+       /* No matching nfs_client found. */
        spin_unlock(&nn->nfs_client_lock);
-       pr_err("NFS: %s Error: no matching nfs_client found\n", __func__);
-       return -NFS4ERR_STALE_CLIENTID;
+       dprintk("NFS: <-- %s status = %d\n", __func__, status);
+       return status;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
index 9448c579d41a40068897f99cbc5db09bccf3e063..e61f68d5ef218dd64cdfec220bab02069cf1d72e 100644 (file)
@@ -136,16 +136,11 @@ int nfs40_discover_server_trunking(struct nfs_client *clp,
        clp->cl_confirm = clid.confirm;
 
        status = nfs40_walk_client_list(clp, result, cred);
-       switch (status) {
-       case -NFS4ERR_STALE_CLIENTID:
-               set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
-       case 0:
+       if (status == 0) {
                /* Sustain the lease, even if it's empty.  If the clientid4
                 * goes stale it's of no use for trunking discovery. */
                nfs4_schedule_state_renewal(*result);
-               break;
        }
-
 out:
        return status;
 }
@@ -1863,6 +1858,7 @@ again:
        case -ETIMEDOUT:
        case -EAGAIN:
                ssleep(1);
+       case -NFS4ERR_STALE_CLIENTID:
                dprintk("NFS: %s after status %d, retrying\n",
                        __func__, status);
                goto again;
@@ -2022,8 +2018,18 @@ static int nfs4_reset_session(struct nfs_client *clp)
        nfs4_begin_drain_session(clp);
        cred = nfs4_get_exchange_id_cred(clp);
        status = nfs4_proc_destroy_session(clp->cl_session, cred);
-       if (status && status != -NFS4ERR_BADSESSION &&
-           status != -NFS4ERR_DEADSESSION) {
+       switch (status) {
+       case 0:
+       case -NFS4ERR_BADSESSION:
+       case -NFS4ERR_DEADSESSION:
+               break;
+       case -NFS4ERR_BACK_CHAN_BUSY:
+       case -NFS4ERR_DELAY:
+               set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+               status = 0;
+               ssleep(1);
+               goto out;
+       default:
                status = nfs4_recovery_handle_error(clp, status);
                goto out;
        }
index 2e7e8c878e5d157418e91a3ce742b8810168c5d3..b056b1628722218bbbadce33e86255e6953c8994 100644 (file)
@@ -2589,27 +2589,23 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags,
        struct nfs_server *server;
        struct dentry *mntroot = ERR_PTR(-ENOMEM);
        struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod;
-       int error;
 
-       dprintk("--> nfs_xdev_mount_common()\n");
+       dprintk("--> nfs_xdev_mount()\n");
 
        mount_info.mntfh = mount_info.cloned->fh;
 
        /* create a new volume representation */
        server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
-       if (IS_ERR(server)) {
-               error = PTR_ERR(server);
-               goto out_err;
-       }
 
-       mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod);
-       dprintk("<-- nfs_xdev_mount_common() = 0\n");
-out:
-       return mntroot;
+       if (IS_ERR(server))
+               mntroot = ERR_CAST(server);
+       else
+               mntroot = nfs_fs_mount_common(server, flags,
+                               dev_name, &mount_info, nfs_mod);
 
-out_err:
-       dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
-       goto out;
+       dprintk("<-- nfs_xdev_mount() = %ld\n",
+                       IS_ERR(mntroot) ? PTR_ERR(mntroot) : 0L);
+       return mntroot;
 }
 
 #if IS_ENABLED(CONFIG_NFS_V4)
index fdb180769485f95c78a0a5b5f33d5752baf0590f..f3859354e41a97afcdb1c4f8fdec4d9e46f09666 100644 (file)
@@ -664,8 +664,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
        if (ret < 0)
                printk(KERN_ERR "NILFS: GC failed during preparation: "
                        "cannot read source blocks: err=%d\n", ret);
-       else
+       else {
+               if (nilfs_sb_need_update(nilfs))
+                       set_nilfs_discontinued(nilfs);
                ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
+       }
 
        nilfs_remove_all_gcinodes(nilfs);
        clear_nilfs_gc_running(nilfs);
index 6a91e6ffbcbded857c4513cee0182ec61f899b74..f7ed9ee46eb9d3818d2210c100731e5eda6ee35e 100644 (file)
@@ -449,7 +449,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                        do {
                                min_flt += t->min_flt;
                                maj_flt += t->maj_flt;
-                               gtime += t->gtime;
+                               gtime += task_gtime(t);
                                t = next_thread(t);
                        } while (t != task);
 
@@ -472,7 +472,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                min_flt = task->min_flt;
                maj_flt = task->maj_flt;
                task_cputime_adjusted(task, &utime, &stime);
-               gtime = task->gtime;
+               gtime = task_gtime(task);
        }
 
        /* scale priority and nice values from timeslices to -20..20 */
index 7003e5266f25748b6099d47ba8a2a2910c83a454..288f068740f6f6e2eb02dea57ed9a5ab1a8b47b8 100644 (file)
@@ -167,12 +167,16 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
 {
        char *hdr;
-       struct timeval timestamp;
+       struct timespec timestamp;
        size_t len;
 
-       do_gettimeofday(&timestamp);
+       /* Report zeroed timestamp if called before timekeeping has resumed. */
+       if (__getnstimeofday(&timestamp)) {
+               timestamp.tv_sec = 0;
+               timestamp.tv_nsec = 0;
+       }
        hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu\n",
-               (long)timestamp.tv_sec, (long)timestamp.tv_usec);
+               (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000));
        WARN_ON_ONCE(!hdr);
        len = hdr ? strlen(hdr) : 0;
        persistent_ram_write(prz, hdr, len);
index 2ef72d9650365c71586592a9c456785b7094a59b..8c1c96c27062a504bfc151f33d335fcd8f6779b5 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/rcupdate.h>
 #include <linux/hrtimer.h>
+#include <linux/sched/rt.h>
 
 #include <asm/uaccess.h>
 
index 9d863fb501f92ab9a9207fcce7d8b2087c637c0e..f2bc3dfd0b883585e1a2afe6734de2db6be77004 100644 (file)
@@ -296,7 +296,7 @@ EXPORT_SYMBOL(seq_read);
  *     seq_lseek -     ->llseek() method for sequential files.
  *     @file: the file in question
  *     @offset: new position
- *     @origin: 0 for absolute, 1 for relative position
+ *     @whence: 0 for absolute, 1 for relative position
  *
  *     Ready-made ->f_op->llseek()
  */
index d44fb568abe1a945c014a03c88648cd847906cc0..e9be396a558d944fde7086cce64d22c672cdc5b7 100644 (file)
@@ -307,7 +307,8 @@ static void udf_sb_free_partitions(struct super_block *sb)
 {
        struct udf_sb_info *sbi = UDF_SB(sb);
        int i;
-
+       if (sbi->s_partmaps == NULL)
+               return;
        for (i = 0; i < sbi->s_partitions; i++)
                udf_free_partition(&sbi->s_partmaps[i]);
        kfree(sbi->s_partmaps);
index 4111a40ebe1a17dde31f89e5154c0d257c6e5d6c..5f707e5371717a331131f72f5babff24670d9bef 100644 (file)
@@ -86,11 +86,11 @@ xfs_destroy_ioend(
        }
 
        if (ioend->io_iocb) {
+               inode_dio_done(ioend->io_inode);
                if (ioend->io_isasync) {
                        aio_complete(ioend->io_iocb, ioend->io_error ?
                                        ioend->io_error : ioend->io_result, 0);
                }
-               inode_dio_done(ioend->io_inode);
        }
 
        mempool_free(ioend, xfs_ioend_pool);
index 0e92d12765d2670146b0325cd1215b449fb60dfe..cdb2d33485837bcd314f380f6d0c6737424626ec 100644 (file)
@@ -4680,9 +4680,6 @@ __xfs_bmapi_allocate(
                        return error;
        }
 
-       if (bma->flags & XFS_BMAPI_STACK_SWITCH)
-               bma->stack_switch = 1;
-
        error = xfs_bmap_alloc(bma);
        if (error)
                return error;
@@ -4956,6 +4953,9 @@ xfs_bmapi_write(
        bma.flist = flist;
        bma.firstblock = firstblock;
 
+       if (flags & XFS_BMAPI_STACK_SWITCH)
+               bma.stack_switch = 1;
+
        while (bno < end && n < *nmap) {
                inhole = eof || bma.got.br_startoff > bno;
                wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
index 26673a0b20e7249a149b22357c0961f86a6ac903..fbbb9eb92e32a7b38ec32c954313cffdb39a8508 100644 (file)
@@ -175,7 +175,7 @@ xfs_buf_get_maps(
        bp->b_map_count = map_count;
 
        if (map_count == 1) {
-               bp->b_maps = &bp->b_map;
+               bp->b_maps = &bp->__b_map;
                return 0;
        }
 
@@ -193,7 +193,7 @@ static void
 xfs_buf_free_maps(
        struct xfs_buf  *bp)
 {
-       if (bp->b_maps != &bp->b_map) {
+       if (bp->b_maps != &bp->__b_map) {
                kmem_free(bp->b_maps);
                bp->b_maps = NULL;
        }
@@ -377,8 +377,8 @@ xfs_buf_allocate_memory(
        }
 
 use_alloc_page:
-       start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
-       end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
+       start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
+       end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
                                                                >> PAGE_SHIFT;
        page_count = end - start;
        error = _xfs_buf_get_pages(bp, page_count, flags);
@@ -487,6 +487,7 @@ _xfs_buf_find(
        struct rb_node          *parent;
        xfs_buf_t               *bp;
        xfs_daddr_t             blkno = map[0].bm_bn;
+       xfs_daddr_t             eofs;
        int                     numblks = 0;
        int                     i;
 
@@ -498,6 +499,23 @@ _xfs_buf_find(
        ASSERT(!(numbytes < (1 << btp->bt_sshift)));
        ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
 
+       /*
+        * Corrupted block numbers can get through to here, unfortunately, so we
+        * have to check that the buffer falls within the filesystem bounds.
+        */
+       eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
+       if (blkno >= eofs) {
+               /*
+                * XXX (dgc): we should really be returning EFSCORRUPTED here,
+                * but none of the higher level infrastructure supports
+                * returning a specific error on buffer lookup failures.
+                */
+               xfs_alert(btp->bt_mount,
+                         "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
+                         __func__, blkno, eofs);
+               return NULL;
+       }
+
        /* get tree root */
        pag = xfs_perag_get(btp->bt_mount,
                                xfs_daddr_to_agno(btp->bt_mount, blkno));
@@ -640,7 +658,7 @@ _xfs_buf_read(
        xfs_buf_flags_t         flags)
 {
        ASSERT(!(flags & XBF_WRITE));
-       ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
+       ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
 
        bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
        bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
@@ -1487,6 +1505,8 @@ restart:
        while (!list_empty(&btp->bt_lru)) {
                bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
                if (atomic_read(&bp->b_hold) > 1) {
+                       trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+                       list_move_tail(&bp->b_lru, &btp->bt_lru);
                        spin_unlock(&btp->bt_lru_lock);
                        delay(100);
                        goto restart;
@@ -1709,7 +1729,7 @@ xfs_buf_cmp(
        struct xfs_buf  *bp = container_of(b, struct xfs_buf, b_list);
        xfs_daddr_t             diff;
 
-       diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
+       diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
        if (diff < 0)
                return -1;
        if (diff > 0)
index 23f5642480bb1ea5f0f8a42423c607e7d61e9b83..433a12ed7b179e44ef784e8a144346af4faa9852 100644 (file)
@@ -151,7 +151,7 @@ typedef struct xfs_buf {
        struct page             **b_pages;      /* array of page pointers */
        struct page             *b_page_array[XB_PAGES]; /* inline pages */
        struct xfs_buf_map      *b_maps;        /* compound buffer map */
-       struct xfs_buf_map      b_map;          /* inline compound buffer map */
+       struct xfs_buf_map      __b_map;        /* inline compound buffer map */
        int                     b_map_count;
        int                     b_io_length;    /* IO size in BBs */
        atomic_t                b_pin_count;    /* pin count */
@@ -330,8 +330,8 @@ void xfs_buf_stale(struct xfs_buf *bp);
  * In future, uncached buffers will pass the block number directly to the io
  * request function and hence these macros will go away at that point.
  */
-#define XFS_BUF_ADDR(bp)               ((bp)->b_map.bm_bn)
-#define XFS_BUF_SET_ADDR(bp, bno)      ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))
+#define XFS_BUF_ADDR(bp)               ((bp)->b_maps[0].bm_bn)
+#define XFS_BUF_SET_ADDR(bp, bno)      ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
 
 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
 {
index becf4a97efc65c95240e45c1b05b751919d51317..3f9949fee391b11cdfd73c12799cccba4ce1b56b 100644 (file)
@@ -71,7 +71,7 @@ xfs_buf_item_log_debug(
                chunk_num = byte >> XFS_BLF_SHIFT;
                word_num = chunk_num >> BIT_TO_WORD_SHIFT;
                bit_num = chunk_num & (NBWORD - 1);
-               wordp = &(bip->bli_format.blf_data_map[word_num]);
+               wordp = &(bip->__bli_format.blf_data_map[word_num]);
                bit_set = *wordp & (1 << bit_num);
                ASSERT(bit_set);
                byte++;
@@ -237,7 +237,7 @@ xfs_buf_item_size(
                 * cancel flag in it.
                 */
                trace_xfs_buf_item_size_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
                return bip->bli_format_count;
        }
 
@@ -278,7 +278,7 @@ xfs_buf_item_format_segment(
        uint            buffer_offset;
 
        /* copy the flags across from the base format item */
-       blfp->blf_flags = bip->bli_format.blf_flags;
+       blfp->blf_flags = bip->__bli_format.blf_flags;
 
        /*
         * Base size is the actual size of the ondisk structure - it reflects
@@ -287,6 +287,17 @@ xfs_buf_item_format_segment(
         */
        base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
                        (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
+
+       nvecs = 0;
+       first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+       if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
+               /*
+                * If the map is not be dirty in the transaction, mark
+                * the size as zero and do not advance the vector pointer.
+                */
+               goto out;
+       }
+
        vecp->i_addr = blfp;
        vecp->i_len = base_size;
        vecp->i_type = XLOG_REG_TYPE_BFORMAT;
@@ -301,15 +312,13 @@ xfs_buf_item_format_segment(
                 */
                trace_xfs_buf_item_format_stale(bip);
                ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
-               blfp->blf_size = nvecs;
-               return vecp;
+               goto out;
        }
 
        /*
         * Fill in an iovec for each set of contiguous chunks.
         */
-       first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
-       ASSERT(first_bit != -1);
+
        last_bit = first_bit;
        nbits = 1;
        for (;;) {
@@ -371,7 +380,8 @@ xfs_buf_item_format_segment(
                        nbits++;
                }
        }
-       bip->bli_format.blf_size = nvecs;
+out:
+       blfp->blf_size = nvecs;
        return vecp;
 }
 
@@ -405,7 +415,7 @@ xfs_buf_item_format(
        if (bip->bli_flags & XFS_BLI_INODE_BUF) {
                if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
                      xfs_log_item_in_current_chkpt(lip)))
-                       bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
+                       bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
                bip->bli_flags &= ~XFS_BLI_INODE_BUF;
        }
 
@@ -485,7 +495,7 @@ xfs_buf_item_unpin(
                ASSERT(bip->bli_flags & XFS_BLI_STALE);
                ASSERT(xfs_buf_islocked(bp));
                ASSERT(XFS_BUF_ISSTALE(bp));
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 
                trace_xfs_buf_item_unpin_stale(bip);
 
@@ -601,7 +611,7 @@ xfs_buf_item_unlock(
 {
        struct xfs_buf_log_item *bip = BUF_ITEM(lip);
        struct xfs_buf          *bp = bip->bli_buf;
-       int                     aborted;
+       int                     aborted, clean, i;
        uint                    hold;
 
        /* Clear the buffer's association with this transaction. */
@@ -631,7 +641,7 @@ xfs_buf_item_unlock(
         */
        if (bip->bli_flags & XFS_BLI_STALE) {
                trace_xfs_buf_item_unlock_stale(bip);
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
                if (!aborted) {
                        atomic_dec(&bip->bli_refcount);
                        return;
@@ -642,12 +652,27 @@ xfs_buf_item_unlock(
 
        /*
         * If the buf item isn't tracking any data, free it, otherwise drop the
-        * reference we hold to it.
+        * reference we hold to it. If we are aborting the transaction, this may
+        * be the only reference to the buf item, so we free it anyway
+        * regardless of whether it is dirty or not. A dirty abort implies a
+        * shutdown, anyway.
         */
-       if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
-                            bip->bli_format.blf_map_size))
+       clean = 1;
+       for (i = 0; i < bip->bli_format_count; i++) {
+               if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
+                            bip->bli_formats[i].blf_map_size)) {
+                       clean = 0;
+                       break;
+               }
+       }
+       if (clean)
                xfs_buf_item_relse(bp);
-       else
+       else if (aborted) {
+               if (atomic_dec_and_test(&bip->bli_refcount)) {
+                       ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
+                       xfs_buf_item_relse(bp);
+               }
+       } else
                atomic_dec(&bip->bli_refcount);
 
        if (!hold)
@@ -716,7 +741,7 @@ xfs_buf_item_get_format(
        bip->bli_format_count = count;
 
        if (count == 1) {
-               bip->bli_formats = &bip->bli_format;
+               bip->bli_formats = &bip->__bli_format;
                return 0;
        }
 
@@ -731,7 +756,7 @@ STATIC void
 xfs_buf_item_free_format(
        struct xfs_buf_log_item *bip)
 {
-       if (bip->bli_formats != &bip->bli_format) {
+       if (bip->bli_formats != &bip->__bli_format) {
                kmem_free(bip->bli_formats);
                bip->bli_formats = NULL;
        }
index 6850f49f4af3f60c0a5a8ef9508f7bac9af00e4c..16def435944ac79063cd3f48bcf8067cc2d270c5 100644 (file)
@@ -104,7 +104,7 @@ typedef struct xfs_buf_log_item {
 #endif
        int                     bli_format_count;       /* count of headers */
        struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
-       struct xfs_buf_log_format bli_format;   /* embedded in-log header */
+       struct xfs_buf_log_format __bli_format; /* embedded in-log header */
 } xfs_buf_log_item_t;
 
 void   xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
index d0e9c74d3d96a75c18d40f175efd51eec85aab18..a8bd26b82ecb00dd14063da4c62c1a64011f0caa 100644 (file)
@@ -246,10 +246,10 @@ xfs_swap_extents(
                goto out_unlock;
        }
 
-       error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
+       error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
        if (error)
                goto out_unlock;
-       truncate_pagecache_range(VFS_I(ip), 0, -1);
+       truncate_pagecache_range(VFS_I(tip), 0, -1);
 
        /* Verify O_DIRECT for ftmp */
        if (VN_CACHED(VFS_I(tip)) != 0) {
index 7536faaa61e7852175f7b7c20f1e4b0c566c24db..12afe07a91d71ddc003a763674d7d1ee45a992d3 100644 (file)
@@ -355,10 +355,12 @@ xfs_dir2_block_addname(
        /*
         * If need to compact the leaf entries, do it now.
         */
-       if (compact)
+       if (compact) {
                xfs_dir2_block_compact(tp, bp, hdr, btp, blp, &needlog,
                                      &lfloghigh, &lfloglow);
-       else if (btp->stale) {
+               /* recalculate blp post-compaction */
+               blp = xfs_dir2_block_leaf_p(btp);
+       } else if (btp->stale) {
                /*
                 * Set leaf logging boundaries to impossible state.
                 * For the no-stale case they're set explicitly.
index add06b4e9a635511afc3e2716836e210ff794c46..364818eef40e55720a6a4509afbde63a1b48cd05 100644 (file)
@@ -351,6 +351,15 @@ xfs_iomap_prealloc_size(
                }
                if (shift)
                        alloc_blocks >>= shift;
+
+               /*
+                * If we are still trying to allocate more space than is
+                * available, squash the prealloc hard. This can happen if we
+                * have a large file on a small filesystem and the above
+                * lowspace thresholds are smaller than MAXEXTLEN.
+                */
+               while (alloc_blocks >= freesp)
+                       alloc_blocks >>= 4;
        }
 
        if (alloc_blocks < mp->m_writeio_blocks)
index da508463ff1006b7b4b5371d5f2c19495cb5d44b..7d6df7c00c36fb22e8f864285c6d2352b44a7cdd 100644 (file)
@@ -658,7 +658,7 @@ xfs_sb_quiet_read_verify(
                return;
        }
        /* quietly fail */
-       xfs_buf_ioerror(bp, EFSCORRUPTED);
+       xfs_buf_ioerror(bp, EWRONGFS);
 }
 
 static void
index 5f53e75409b8f45ad919aae17b24cf92c5218a06..8a59f8546552e9577b3a93c44b8cfb7251e3561a 100644 (file)
@@ -784,11 +784,11 @@ xfs_qm_scall_getquota(
             (XFS_IS_OQUOTA_ENFORCED(mp) &&
                        (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
            dst->d_id != 0) {
-               if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) &&
+               if ((dst->d_bcount > dst->d_blk_softlimit) &&
                    (dst->d_blk_softlimit > 0)) {
                        ASSERT(dst->d_btimer != 0);
                }
-               if (((int) dst->d_icount > (int) dst->d_ino_softlimit) &&
+               if ((dst->d_icount > dst->d_ino_softlimit) &&
                    (dst->d_ino_softlimit > 0)) {
                        ASSERT(dst->d_itimer != 0);
                }
index 2e137d4a85ae66bc3a9172a4d93f734ba7aabb99..16a812977eab3d8fcf001e0175e6cfd773dcfe67 100644 (file)
@@ -341,6 +341,7 @@ DEFINE_BUF_EVENT(xfs_buf_item_relse);
 DEFINE_BUF_EVENT(xfs_buf_item_iodone);
 DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
 DEFINE_BUF_EVENT(xfs_buf_error_relse);
+DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
 DEFINE_BUF_EVENT(xfs_trans_read_buf_io);
 DEFINE_BUF_EVENT(xfs_trans_read_buf_shut);
 
index 4fc17d479d42301d13cc86bfd2a8aec194fecea0..3edf5dbee001c60239a11254dbd697c7602e9b06 100644 (file)
@@ -93,7 +93,7 @@ _xfs_trans_bjoin(
        xfs_buf_item_init(bp, tp->t_mountp);
        bip = bp->b_fspriv;
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
        if (reset_recur)
                bip->bli_recur = 0;
@@ -432,7 +432,7 @@ xfs_trans_brelse(xfs_trans_t        *tp,
        bip = bp->b_fspriv;
        ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
        trace_xfs_trans_brelse(bip);
@@ -519,7 +519,7 @@ xfs_trans_bhold(xfs_trans_t *tp,
        ASSERT(bp->b_transp == tp);
        ASSERT(bip != NULL);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
        bip->bli_flags |= XFS_BLI_HOLD;
@@ -539,7 +539,7 @@ xfs_trans_bhold_release(xfs_trans_t *tp,
        ASSERT(bp->b_transp == tp);
        ASSERT(bip != NULL);
        ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
-       ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
+       ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        ASSERT(bip->bli_flags & XFS_BLI_HOLD);
 
@@ -598,7 +598,7 @@ xfs_trans_log_buf(xfs_trans_t       *tp,
                bip->bli_flags &= ~XFS_BLI_STALE;
                ASSERT(XFS_BUF_ISSTALE(bp));
                XFS_BUF_UNSTALE(bp);
-               bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
+               bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
        }
 
        tp->t_flags |= XFS_TRANS_DIRTY;
@@ -643,6 +643,7 @@ xfs_trans_binval(
        xfs_buf_t       *bp)
 {
        xfs_buf_log_item_t      *bip = bp->b_fspriv;
+       int                     i;
 
        ASSERT(bp->b_transp == tp);
        ASSERT(bip != NULL);
@@ -657,8 +658,8 @@ xfs_trans_binval(
                 */
                ASSERT(XFS_BUF_ISSTALE(bp));
                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
-               ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
-               ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+               ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
+               ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
                ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
                ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
                return;
@@ -668,10 +669,12 @@ xfs_trans_binval(
 
        bip->bli_flags |= XFS_BLI_STALE;
        bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
-       bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
-       bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
-       memset((char *)(bip->bli_format.blf_data_map), 0,
-             (bip->bli_format.blf_map_size * sizeof(uint)));
+       bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
+       bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
+       for (i = 0; i < bip->bli_format_count; i++) {
+               memset(bip->bli_formats[i].blf_data_map, 0,
+                      (bip->bli_formats[i].blf_map_size * sizeof(uint)));
+       }
        bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
        tp->t_flags |= XFS_TRANS_DIRTY;
 }
@@ -775,5 +778,5 @@ xfs_trans_dquot_buf(
               type == XFS_BLF_GDQUOT_BUF);
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
-       bip->bli_format.blf_flags |= type;
+       bip->__bli_format.blf_flags |= type;
 }
index 9a62937c56ca342812177e1022910194ca8fba7a..51969436b8b83b21730d25a5cef434ac6db3ef06 100644 (file)
@@ -4,66 +4,12 @@
 #include <linux/time.h>
 #include <linux/jiffies.h>
 
-typedef unsigned long __nocast cputime_t;
-
-#define cputime_one_jiffy              jiffies_to_cputime(1)
-#define cputime_to_jiffies(__ct)       (__force unsigned long)(__ct)
-#define cputime_to_scaled(__ct)                (__ct)
-#define jiffies_to_cputime(__hz)       (__force cputime_t)(__hz)
-
-typedef u64 __nocast cputime64_t;
-
-#define cputime64_to_jiffies64(__ct)   (__force u64)(__ct)
-#define jiffies64_to_cputime64(__jif)  (__force cputime64_t)(__jif)
-
-#define nsecs_to_cputime64(__ct)       \
-       jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
-
-
-/*
- * Convert cputime to microseconds and back.
- */
-#define cputime_to_usecs(__ct)         \
-       jiffies_to_usecs(cputime_to_jiffies(__ct))
-#define usecs_to_cputime(__usec)       \
-       jiffies_to_cputime(usecs_to_jiffies(__usec))
-#define usecs_to_cputime64(__usec)     \
-       jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
-
-/*
- * Convert cputime to seconds and back.
- */
-#define cputime_to_secs(jif)           (cputime_to_jiffies(jif) / HZ)
-#define secs_to_cputime(sec)           jiffies_to_cputime((sec) * HZ)
-
-/*
- * Convert cputime to timespec and back.
- */
-#define timespec_to_cputime(__val)     \
-       jiffies_to_cputime(timespec_to_jiffies(__val))
-#define cputime_to_timespec(__ct,__val)        \
-       jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
-
-/*
- * Convert cputime to timeval and back.
- */
-#define timeval_to_cputime(__val)      \
-       jiffies_to_cputime(timeval_to_jiffies(__val))
-#define cputime_to_timeval(__ct,__val) \
-       jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
-
-/*
- * Convert cputime to clock and back.
- */
-#define cputime_to_clock_t(__ct)       \
-       jiffies_to_clock_t(cputime_to_jiffies(__ct))
-#define clock_t_to_cputime(__x)                \
-       jiffies_to_cputime(clock_t_to_jiffies(__x))
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+# include <asm-generic/cputime_jiffies.h>
+#endif
 
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct)     \
-       jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+# include <asm-generic/cputime_nsecs.h>
+#endif
 
 #endif
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
new file mode 100644 (file)
index 0000000..272ecba
--- /dev/null
@@ -0,0 +1,72 @@
+#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
+#define _ASM_GENERIC_CPUTIME_JIFFIES_H
+
+typedef unsigned long __nocast cputime_t;
+
+#define cputime_one_jiffy              jiffies_to_cputime(1)
+#define cputime_to_jiffies(__ct)       (__force unsigned long)(__ct)
+#define cputime_to_scaled(__ct)                (__ct)
+#define jiffies_to_cputime(__hz)       (__force cputime_t)(__hz)
+
+typedef u64 __nocast cputime64_t;
+
+#define cputime64_to_jiffies64(__ct)   (__force u64)(__ct)
+#define jiffies64_to_cputime64(__jif)  (__force cputime64_t)(__jif)
+
+
+/*
+ * Convert nanoseconds to cputime
+ */
+#define nsecs_to_cputime64(__nsec)     \
+       jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
+#define nsecs_to_cputime(__nsec)       \
+       jiffies_to_cputime(nsecs_to_jiffies(__nsec))
+
+
+/*
+ * Convert cputime to microseconds and back.
+ */
+#define cputime_to_usecs(__ct)         \
+       jiffies_to_usecs(cputime_to_jiffies(__ct))
+#define usecs_to_cputime(__usec)       \
+       jiffies_to_cputime(usecs_to_jiffies(__usec))
+#define usecs_to_cputime64(__usec)     \
+       jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
+
+/*
+ * Convert cputime to seconds and back.
+ */
+#define cputime_to_secs(jif)           (cputime_to_jiffies(jif) / HZ)
+#define secs_to_cputime(sec)           jiffies_to_cputime((sec) * HZ)
+
+/*
+ * Convert cputime to timespec and back.
+ */
+#define timespec_to_cputime(__val)     \
+       jiffies_to_cputime(timespec_to_jiffies(__val))
+#define cputime_to_timespec(__ct,__val)        \
+       jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
+
+/*
+ * Convert cputime to timeval and back.
+ */
+#define timeval_to_cputime(__val)      \
+       jiffies_to_cputime(timeval_to_jiffies(__val))
+#define cputime_to_timeval(__ct,__val) \
+       jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
+
+/*
+ * Convert cputime to clock and back.
+ */
+#define cputime_to_clock_t(__ct)       \
+       jiffies_to_clock_t(cputime_to_jiffies(__ct))
+#define clock_t_to_cputime(__x)                \
+       jiffies_to_cputime(clock_t_to_jiffies(__x))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct)     \
+       jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
+
+#endif
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
new file mode 100644 (file)
index 0000000..b6485ca
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Definitions for measuring cputime in nsecs resolution.
+ *
+ * Based on <arch/ia64/include/asm/cputime.h>
+ *
+ * Copyright (C) 2007 FUJITSU LIMITED
+ * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
+#define _ASM_GENERIC_CPUTIME_NSECS_H
+
+typedef u64 __nocast cputime_t;
+typedef u64 __nocast cputime64_t;
+
+#define cputime_one_jiffy              jiffies_to_cputime(1)
+
+/*
+ * Convert cputime <-> jiffies (HZ)
+ */
+#define cputime_to_jiffies(__ct)       \
+       ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+#define cputime_to_scaled(__ct)                (__ct)
+#define jiffies_to_cputime(__jif)      \
+       (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
+#define cputime64_to_jiffies64(__ct)   \
+       ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
+#define jiffies64_to_cputime64(__jif)  \
+       (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
+
+
+/*
+ * Convert cputime <-> nanoseconds
+ */
+#define nsecs_to_cputime(__nsecs)      ((__force u64)(__nsecs))
+
+
+/*
+ * Convert cputime <-> microseconds
+ */
+#define cputime_to_usecs(__ct)         \
+       ((__force u64)(__ct) / NSEC_PER_USEC)
+#define usecs_to_cputime(__usecs)      \
+       (__force cputime_t)((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs)    \
+       (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
+
+/*
+ * Convert cputime <-> seconds
+ */
+#define cputime_to_secs(__ct)          \
+       ((__force u64)(__ct) / NSEC_PER_SEC)
+#define secs_to_cputime(__secs)                \
+       (__force cputime_t)((__secs) * NSEC_PER_SEC)
+
+/*
+ * Convert cputime <-> timespec (nsec)
+ */
+static inline cputime_t timespec_to_cputime(const struct timespec *val)
+{
+       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+       return (__force cputime_t) ret;
+}
+static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+{
+       val->tv_sec  = (__force u64) ct / NSEC_PER_SEC;
+       val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
+}
+
+/*
+ * Convert cputime <-> timeval (msec)
+ */
+static inline cputime_t timeval_to_cputime(struct timeval *val)
+{
+       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+       return (__force cputime_t) ret;
+}
+static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+{
+       val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
+       val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
+}
+
+/*
+ * Convert cputime <-> clock (USER_HZ)
+ */
+#define cputime_to_clock_t(__ct)       \
+       ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
+#define clock_t_to_cputime(__x)                \
+       (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct)     \
+       cputime_to_clock_t((__force cputime_t)__ct)
+
+#endif
index ccf7b4f34a3c4e070fb44cd7456c859ee5c3c645..6c32af918c2f1dc257fea3613bcf5bca8137d9ec 100644 (file)
@@ -16,6 +16,22 @@ extern void
 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
                    dma_addr_t dma_handle);
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       return dma_alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *cpu_addr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+       /* attrs is not supported and ignored */
+       dma_free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
index 701beab27aab71c94031151d9daaf6090f570b99..5cf680a98f9bcb7494a2c0a4bc557fca00b84296 100644 (file)
@@ -461,10 +461,8 @@ static inline int is_zero_pfn(unsigned long pfn)
        return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
 }
 
-static inline unsigned long my_zero_pfn(unsigned long addr)
-{
-       return page_to_pfn(ZERO_PAGE(addr));
-}
+#define my_zero_pfn(addr)      page_to_pfn(ZERO_PAGE(addr))
+
 #else
 static inline int is_zero_pfn(unsigned long pfn)
 {
index 58f466ff00d363701155427cb7968515ef60c935..1db51b8524e9fb86fcc71e797f95a6518c259bfc 100644 (file)
@@ -21,10 +21,12 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
                        unsigned long fd, off_t pgoff);
 #endif
 
+#ifndef CONFIG_GENERIC_SIGALTSTACK
 #ifndef sys_sigaltstack
 asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
                        struct pt_regs *);
 #endif
+#endif
 
 #ifndef sys_rt_sigreturn
 asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
index 0f4a366f6fa6e3a872a995035c867d482368178c..3527fb3f75bbe82c408e8614f5cfbb8c1a88dff9 100644 (file)
@@ -70,7 +70,7 @@ struct drm_mm {
        unsigned long scan_color;
        unsigned long scan_size;
        unsigned long scan_hit_start;
-       unsigned scan_hit_size;
+       unsigned long scan_hit_end;
        unsigned scanned_blocks;
        unsigned long scan_start;
        unsigned long scan_end;
index 544abdb2238ccdffda329816578b41bb954fc617..ec10e1b24c1cce50d50581d58c5000cf7b9e5565 100644 (file)
@@ -49,8 +49,8 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
 }
 #endif
 
-extern void cper_print_aer(const char *prefix, int cper_severity,
-                          struct aer_capability_regs *aer);
+extern void cper_print_aer(const char *prefix, struct pci_dev *dev,
+                          int cper_severity, struct aer_capability_regs *aer);
 extern int cper_severity_to_aer(int cper_severity);
 extern void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
                              int severity);
index 7a24fe9b44b4bab04edf6b49583a7dfa8bd5a530..a2e3f18b2ad6953fde84a344f3a5ce9dfa35489f 100644 (file)
@@ -19,8 +19,7 @@ typedef u64 async_cookie_t;
 typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
 struct async_domain {
        struct list_head node;
-       struct list_head domain;
-       int count;
+       struct list_head pending;
        unsigned registered:1;
 };
 
@@ -29,8 +28,7 @@ struct async_domain {
  */
 #define ASYNC_DOMAIN(_name) \
        struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
-                                     .domain = LIST_HEAD_INIT(_name.domain), \
-                                     .count = 0, \
+                                     .pending = LIST_HEAD_INIT(_name.pending), \
                                      .registered = 1 }
 
 /*
@@ -39,8 +37,7 @@ struct async_domain {
  */
 #define ASYNC_DOMAIN_EXCLUSIVE(_name) \
        struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
-                                     .domain = LIST_HEAD_INIT(_name.domain), \
-                                     .count = 0, \
+                                     .pending = LIST_HEAD_INIT(_name.pending), \
                                      .registered = 0 }
 
 extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
@@ -52,4 +49,5 @@ extern void async_synchronize_full_domain(struct async_domain *domain);
 extern void async_synchronize_cookie(async_cookie_t cookie);
 extern void async_synchronize_cookie_domain(async_cookie_t cookie,
                                            struct async_domain *domain);
+extern bool current_is_async(void);
 #endif
index 408da9502177bd18a3ba88e2831701e916e800aa..8f7a3d68371a37314ab8263c078674fd79a8c5b5 100644 (file)
@@ -297,10 +297,12 @@ enum {
        ATA_LOG_SATA_NCQ        = 0x10,
        ATA_LOG_SATA_ID_DEV_DATA  = 0x30,
        ATA_LOG_SATA_SETTINGS     = 0x08,
-       ATA_LOG_DEVSLP_MDAT       = 0x30,
+       ATA_LOG_DEVSLP_OFFSET     = 0x30,
+       ATA_LOG_DEVSLP_SIZE       = 0x08,
+       ATA_LOG_DEVSLP_MDAT       = 0x00,
        ATA_LOG_DEVSLP_MDAT_MASK  = 0x1F,
-       ATA_LOG_DEVSLP_DETO       = 0x31,
-       ATA_LOG_DEVSLP_VALID      = 0x37,
+       ATA_LOG_DEVSLP_DETO       = 0x01,
+       ATA_LOG_DEVSLP_VALID      = 0x07,
        ATA_LOG_DEVSLP_VALID_MASK = 0x80,
 
        /* READ/WRITE LONG (obsolete) */
index bce729afbcf9a5e184e70edc1bebf9f65a0176a3..5a6d718adf34825eb11bc4dd20cbcb8f1dced0ad 100644 (file)
@@ -24,6 +24,7 @@
 #define _LINUX_AUDIT_H_
 
 #include <linux/sched.h>
+#include <linux/ptrace.h>
 #include <uapi/linux/audit.h>
 
 struct audit_sig_info {
@@ -157,7 +158,8 @@ void audit_core_dumps(long signr);
 
 static inline void audit_seccomp(unsigned long syscall, long signr, int code)
 {
-       if (unlikely(!audit_dummy_context()))
+       /* Force a record to be reported if a signal was delivered. */
+       if (signr || unlikely(!audit_dummy_context()))
                __audit_seccomp(syscall, signr, code);
 }
 
index 7911fda23bb439975d7e9b0b0941b94a2ea3c910..97ade7cdc8708840897de448ccbba138dfcabd99 100644 (file)
 
 #define BMA150_DRIVER          "bma150"
 
+#define BMA150_RANGE_2G                0
+#define BMA150_RANGE_4G                1
+#define BMA150_RANGE_8G                2
+
+#define BMA150_BW_25HZ         0
+#define BMA150_BW_50HZ         1
+#define BMA150_BW_100HZ                2
+#define BMA150_BW_190HZ                3
+#define BMA150_BW_375HZ                4
+#define BMA150_BW_750HZ                5
+#define BMA150_BW_1500HZ       6
+
 struct bma150_cfg {
        bool any_motion_int;            /* Set to enable any-motion interrupt */
        bool hg_int;                    /* Set to enable high-G interrupt */
@@ -34,8 +46,8 @@ struct bma150_cfg {
        unsigned char lg_hyst;          /* Low-G hysterisis */
        unsigned char lg_dur;           /* Low-G duration */
        unsigned char lg_thres;         /* Low-G threshold */
-       unsigned char range;            /* BMA0150_RANGE_xxx (in G) */
-       unsigned char bandwidth;        /* BMA0150_BW_xxx (in Hz) */
+       unsigned char range;            /* one of BMA0150_RANGE_xxx */
+       unsigned char bandwidth;        /* one of BMA0150_BW_xxx */
 };
 
 struct bma150_platform_data {
index 7d73905dcba2166f91fd029518389ab0bbe66d1f..900af5964f5543f6dd1b5308419e93c70cb0ca07 100644 (file)
@@ -203,6 +203,7 @@ struct cgroup {
 
        /* For RCU-protected deletion */
        struct rcu_head rcu_head;
+       struct work_struct free_work;
 
        /* List of events which userspace want to receive */
        struct list_head event_list;
@@ -558,6 +559,7 @@ static inline struct cgroup* task_cgroup(struct task_struct *task,
 
 struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
                                          struct cgroup *cgroup);
+struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
 
 /**
  * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
@@ -706,7 +708,6 @@ struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
 static inline void cgroup_fork(struct task_struct *p) {}
-static inline void cgroup_fork_callbacks(struct task_struct *p) {}
 static inline void cgroup_post_fork(struct task_struct *p) {}
 static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
 
index 8a7096fcb01ee1354e1d46c67d5d1ff9919bc742..66346521cb6561641045297cb74c8d15f7b71f74 100644 (file)
@@ -161,6 +161,15 @@ clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
 extern void clockevents_suspend(void);
 extern void clockevents_resume(void);
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+#ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
+extern void tick_broadcast(const struct cpumask *mask);
+#else
+#define tick_broadcast NULL
+#endif
+extern int tick_receive_broadcast(void);
+#endif
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void clockevents_notify(unsigned long reason, void *arg);
 #else
index 6ecb6dc2f3033fc50ba1a8f14eb4b2b12b8ec044..cc7bddeaf553b334e6aa1250fbf74924358e5de4 100644 (file)
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
 extern int fragmentation_index(struct zone *zone, unsigned int order);
 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *mask,
-                       bool sync, bool *contended, struct page **page);
+                       bool sync, bool *contended);
 extern int compact_pgdat(pg_data_t *pgdat, int order);
 extern void reset_isolation_suitable(pg_data_t *pgdat);
 extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -75,7 +75,7 @@ static inline bool compaction_restarting(struct zone *zone, int order)
 #else
 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
-                       bool sync, bool *contended, struct page **page)
+                       bool sync, bool *contended)
 {
        return COMPACT_CONTINUE;
 }
index e24339ccb7f04a066dd52b7a88cd8a7d06c265bc..b28d161c1091908b9395df4e3ad73e6d88518966 100644 (file)
@@ -3,12 +3,40 @@
 
 #ifdef CONFIG_CONTEXT_TRACKING
 #include <linux/sched.h>
+#include <linux/percpu.h>
+
+struct context_tracking {
+       /*
+        * When active is false, probes are unset in order
+        * to minimize overhead: TIF flags are cleared
+        * and calls to user_enter/exit are ignored. This
+        * may be further optimized using static keys.
+        */
+       bool active;
+       enum {
+               IN_KERNEL = 0,
+               IN_USER,
+       } state;
+};
+
+DECLARE_PER_CPU(struct context_tracking, context_tracking);
+
+static inline bool context_tracking_in_user(void)
+{
+       return __this_cpu_read(context_tracking.state) == IN_USER;
+}
+
+static inline bool context_tracking_active(void)
+{
+       return __this_cpu_read(context_tracking.active);
+}
 
 extern void user_enter(void);
 extern void user_exit(void);
 extern void context_tracking_task_switch(struct task_struct *prev,
                                         struct task_struct *next);
 #else
+static inline bool context_tracking_in_user(void) { return false; }
 static inline void user_enter(void) { }
 static inline void user_exit(void) { }
 static inline void context_tracking_task_switch(struct task_struct *prev,
index ac3bbb5b95029caeea6546372ad2b6f35c0b83ad..1739510d89943df23f193d5fb04332f47cdaf958 100644 (file)
 #include <linux/cpumask.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/kref.h>
 
 /**
  * struct cpu_rmap - CPU affinity reverse-map
+ * @refcount: kref for object
  * @size: Number of objects to be reverse-mapped
  * @used: Number of objects added
  * @obj: Pointer to array of object pointers
@@ -23,6 +25,7 @@
  *      based on affinity masks
  */
 struct cpu_rmap {
+       struct kref     refcount;
        u16             size, used;
        void            **obj;
        struct {
@@ -33,15 +36,7 @@ struct cpu_rmap {
 #define CPU_RMAP_DIST_INF 0xffff
 
 extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
-
-/**
- * free_cpu_rmap - free CPU affinity reverse-map
- * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
- */
-static inline void free_cpu_rmap(struct cpu_rmap *rmap)
-{
-       kfree(rmap);
-}
+extern int cpu_rmap_put(struct cpu_rmap *rmap);
 
 extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
 extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
index 3711b34dc4f9fd8cafab9691bce55e617326dbb6..24cd1037b6d6f487d93ce79dfe0844887f949b9f 100644 (file)
@@ -126,9 +126,9 @@ struct cpuidle_driver {
        struct module           *owner;
        int                     refcnt;
 
-       unsigned int            power_specified:1;
        /* set to 1 to use the core cpuidle time keeping (for all states). */
        unsigned int            en_core_tk_irqen:1;
+       /* states array must be ordered in decreasing power consumption */
        struct cpuidle_state    states[CPUIDLE_STATE_MAX];
        int                     state_count;
        int                     safe_state_index;
index 43dcda937ddf82fcb8661372a68899953209e3f2..001f6637aa476eacb7cd75b36008ecce35d3bd53 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
+#include <linux/pinctrl/devinfo.h>
 #include <linux/pm.h>
 #include <linux/atomic.h>
 #include <linux/ratelimit.h>
@@ -620,6 +621,8 @@ struct acpi_dev_node {
  * @pm_domain: Provide callbacks that are executed during system suspend,
  *             hibernation, system resume and during runtime PM transitions
  *             along with subsystem-level and driver-level callbacks.
+ * @pins:      For device pin management.
+ *             See Documentation/pinctrl.txt for details.
  * @numa_node: NUMA node this device is close to.
  * @dma_mask:  Dma mask (if dma'ble device).
  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -672,6 +675,10 @@ struct device {
        struct dev_pm_info      power;
        struct dev_pm_domain    *pm_domain;
 
+#ifdef CONFIG_PINCTRL
+       struct dev_pin_info     *pins;
+#endif
+
 #ifdef CONFIG_NUMA
        int             numa_node;      /* NUMA node this device is close to */
 #endif
index 8b84916dc6719ce46430fa791aa27c0bf8b16835..7a9498ab3c2d10dedf8af5c5dae3b2fd56753684 100644 (file)
@@ -618,18 +618,30 @@ extern int __init efi_setup_pcdp_console(char *);
 #endif
 
 /*
- * We play games with efi_enabled so that the compiler will, if possible, remove
- * EFI-related code altogether.
+ * We play games with efi_enabled so that the compiler will, if
+ * possible, remove EFI-related code altogether.
  */
+#define EFI_BOOT               0       /* Were we booted from EFI? */
+#define EFI_SYSTEM_TABLES      1       /* Can we use EFI system tables? */
+#define EFI_CONFIG_TABLES      2       /* Can we use EFI config tables? */
+#define EFI_RUNTIME_SERVICES   3       /* Can we use runtime services? */
+#define EFI_MEMMAP             4       /* Can we use EFI memory map? */
+#define EFI_64BIT              5       /* Is the firmware 64-bit? */
+
 #ifdef CONFIG_EFI
 # ifdef CONFIG_X86
-   extern int efi_enabled;
-   extern bool efi_64bit;
+extern int efi_enabled(int facility);
 # else
-#  define efi_enabled 1
+static inline int efi_enabled(int facility)
+{
+       return 1;
+}
 # endif
 #else
-# define efi_enabled 0
+static inline int efi_enabled(int facility)
+{
+       return 0;
+}
 #endif
 
 /*
index c03af7687bb4fdd916d5bf3c835e801376132392..18662063175044e4641610799d48422e3cd583f0 100644 (file)
@@ -138,6 +138,7 @@ extern void elv_drain_elevator(struct request_queue *);
 /*
  * io scheduler registration
  */
+extern void __init load_default_elevator_module(void);
 extern int elv_register(struct elevator_type *);
 extern void elv_unregister(struct elevator_type *);
 
@@ -206,5 +207,9 @@ enum {
        INIT_LIST_HEAD(&(rq)->csd.list);        \
        } while (0)
 
+#else /* CONFIG_BLOCK */
+
+static inline void load_default_elevator_module(void) { }
+
 #endif /* CONFIG_BLOCK */
 #endif
index 92691d85c32061f4d0acbe45ecccfcd78267240d..e5ca8ef50e9bf674e46fa58d7fff5f0424ab5bad 100644 (file)
@@ -74,7 +74,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
  * SAVE_REGS - The ftrace_ops wants regs saved at each function called
  *            and passed to the callback. If this flag is set, but the
  *            architecture does not support passing regs
- *            (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
+ *            (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
  *            ftrace_ops will fail to register, unless the next flag
  *            is set.
  * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
@@ -418,7 +418,7 @@ void ftrace_modify_all_code(int command);
 #endif
 
 #ifndef FTRACE_REGS_ADDR
-#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
 #else
 # define FTRACE_REGS_ADDR FTRACE_ADDR
@@ -480,7 +480,7 @@ extern int ftrace_make_nop(struct module *mod,
  */
 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
 
-#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 /**
  * ftrace_modify_call - convert from one addr to another (no nop)
  * @rec: the mcount call site record
index a3d489531d83a7c396aaa6a24d09961eeae47929..13a54d0bdfa8afbe2cc29ec6bf8ce016c1b2306c 100644 (file)
@@ -49,7 +49,6 @@ struct trace_entry {
        unsigned char           flags;
        unsigned char           preempt_count;
        int                     pid;
-       int                     padding;
 };
 
 #define FTRACE_MAX_EVENT                                               \
@@ -84,6 +83,9 @@ struct trace_iterator {
        long                    idx;
 
        cpumask_var_t           started;
+
+       /* it's true when current open file is snapshot */
+       bool                    snapshot;
 };
 
 enum trace_iter_flags {
@@ -272,7 +274,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
 extern int trace_add_event_call(struct ftrace_event_call *call);
 extern void trace_remove_event_call(struct ftrace_event_call *call);
 
-#define is_signed_type(type)   (((type)(-1)) < 0)
+#define is_signed_type(type)   (((type)(-1)) < (type)0)
 
 int trace_set_clr_event(const char *system, const char *event, int set);
 
index 624ef3f45c8efe51667a6d2d9f558e3d6351736f..29eb805ea4a6b2c98236bbd0f24cc00a45ec53df 100644 (file)
@@ -153,7 +153,7 @@ extern void rcu_nmi_exit(void);
  */
 #define __irq_enter()                                  \
        do {                                            \
-               vtime_account_irq_enter(current);       \
+               account_irq_enter_time(current);        \
                add_preempt_count(HARDIRQ_OFFSET);      \
                trace_hardirq_enter();                  \
        } while (0)
@@ -169,7 +169,7 @@ extern void irq_enter(void);
 #define __irq_exit()                                   \
        do {                                            \
                trace_hardirq_exit();                   \
-               vtime_account_irq_exit(current);        \
+               account_irq_exit_time(current);         \
                sub_preempt_count(HARDIRQ_OFFSET);      \
        } while (0)
 
@@ -180,10 +180,10 @@ extern void irq_exit(void);
 
 #define nmi_enter()                                            \
        do {                                                    \
+               lockdep_off();                                  \
                ftrace_nmi_enter();                             \
                BUG_ON(in_nmi());                               \
                add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
-               lockdep_off();                                  \
                rcu_nmi_enter();                                \
                trace_hardirq_enter();                          \
        } while (0)
@@ -192,10 +192,10 @@ extern void irq_exit(void);
        do {                                                    \
                trace_hardirq_exit();                           \
                rcu_nmi_exit();                                 \
-               lockdep_on();                                   \
                BUG_ON(!in_nmi());                              \
                sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
                ftrace_nmi_exit();                              \
+               lockdep_on();                                   \
        } while (0)
 
 #endif /* LINUX_HARDIRQ_H */
index 82b29ae6ebb0d38235a364f447263a4183c66534..b2514f70d591d799197cdb2c5050bd131641f925 100644 (file)
@@ -20,16 +20,4 @@ struct device *hwmon_device_register(struct device *dev);
 
 void hwmon_device_unregister(struct device *dev);
 
-/* Scale user input to sensible values */
-static inline int SENSORS_LIMIT(long value, long low, long high)
-{
-       if (value < low)
-               return low;
-       else if (value > high)
-               return high;
-       else
-               return value;
-}
-
 #endif
-
index a799273714accff40c0bf54850389b095746ff83..861814710d523c97a7e9cf98f7145d49630d088a 100644 (file)
 
 #define __exit          __section(.exit.text) __exitused __cold notrace
 
-/* Used for HOTPLUG, but that is always enabled now, so just make them noops */
-#define __devinit
-#define __devinitdata
-#define __devinitconst
-#define __devexit
-#define __devexitdata
-#define __devexitconst
-
 /* Used for HOTPLUG_CPU */
 #define __cpuinit        __section(.cpuinit.text) __cold notrace
 #define __cpuinitdata    __section(.cpuinit.data)
@@ -161,6 +153,7 @@ extern unsigned int reset_devices;
 /* used by init/main.c */
 void setup_arch(char **);
 void prepare_namespace(void);
+void __init load_default_modules(void);
 
 extern void (*late_time_init)(void);
 
@@ -337,18 +330,6 @@ void __init parse_early_options(char *cmdline);
 #define __INITRODATA_OR_MODULE __INITRODATA
 #endif /*CONFIG_MODULES*/
 
-/* Functions marked as __devexit may be discarded at kernel link time, depending
-   on config options.  Newer versions of binutils detect references from
-   retained sections to discarded sections and flag an error.  Pointers to
-   __devexit functions must use __devexit_p(function_name), the wrapper will
-   insert either the function_name or NULL, depending on the config options.
- */
-#if defined(MODULE) || defined(CONFIG_HOTPLUG)
-#define __devexit_p(x) x
-#else
-#define __devexit_p(x) NULL
-#endif
-
 #ifdef MODULE
 #define __exit_p(x) x
 #else
index 6d087c5f57f79e5a22ffa9a440061b5079838f53..5cd0f09499271283795bb49a7b18b8ed3f1930cd 100644 (file)
@@ -10,7 +10,9 @@
 #include <linux/pid_namespace.h>
 #include <linux/user_namespace.h>
 #include <linux/securebits.h>
+#include <linux/seqlock.h>
 #include <net/net_namespace.h>
+#include <linux/sched/rt.h>
 
 #ifdef CONFIG_SMP
 # define INIT_PUSHABLE_TASKS(tsk)                                      \
@@ -141,6 +143,15 @@ extern struct task_group root_task_group;
 # define INIT_PERF_EVENTS(tsk)
 #endif
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+# define INIT_VTIME(tsk)                                               \
+       .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
+       .vtime_snap = 0,                                \
+       .vtime_snap_whence = VTIME_SYS,
+#else
+# define INIT_VTIME(tsk)
+#endif
+
 #define INIT_TASK_COMM "swapper"
 
 /*
@@ -210,6 +221,7 @@ extern struct task_group root_task_group;
        INIT_TRACE_RECURSION                                            \
        INIT_TASK_RCU_PREEMPT(tsk)                                      \
        INIT_CPUSET_SEQ                                                 \
+       INIT_VTIME(tsk)                                                 \
 }
 
 
index 57e01a7cb0060eb5e02f76ca72d18ccf707d2731..010d98175efa78de44e641671cf7a3545aa959d8 100644 (file)
@@ -13,6 +13,8 @@
 #ifndef __LINUX_INPUT_ADXL34X_H__
 #define __LINUX_INPUT_ADXL34X_H__
 
+#include <linux/input.h>
+
 struct adxl34x_platform_data {
 
        /*
diff --git a/include/linux/input/tegra_kbc.h b/include/linux/input/tegra_kbc.h
deleted file mode 100644 (file)
index a130256..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Platform definitions for tegra-kbc keyboard input driver
- *
- * Copyright (c) 2010-2011, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#ifndef ASMARM_ARCH_TEGRA_KBC_H
-#define ASMARM_ARCH_TEGRA_KBC_H
-
-#include <linux/types.h>
-#include <linux/input/matrix_keypad.h>
-
-#define KBC_MAX_GPIO   24
-#define KBC_MAX_KPENT  8
-
-#define KBC_MAX_ROW    16
-#define KBC_MAX_COL    8
-#define KBC_MAX_KEY    (KBC_MAX_ROW * KBC_MAX_COL)
-
-enum tegra_pin_type {
-       PIN_CFG_IGNORE,
-       PIN_CFG_COL,
-       PIN_CFG_ROW,
-};
-
-struct tegra_kbc_pin_cfg {
-       enum tegra_pin_type type;
-       unsigned char num;
-};
-
-struct tegra_kbc_wake_key {
-       u8 row:4;
-       u8 col:4;
-};
-
-struct tegra_kbc_platform_data {
-       unsigned int debounce_cnt;
-       unsigned int repeat_cnt;
-
-       struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
-       const struct matrix_keymap_data *keymap_data;
-
-       u32 wakeup_key;
-       bool wakeup;
-       bool use_fn_map;
-       bool use_ghost_filter;
-};
-#endif
index 5e4e6170f43a5ef672ddd0d28694849669e310cb..5fa5afeeb7599d6c0f05d7fb59796cf153a614a9 100644 (file)
@@ -268,11 +268,6 @@ struct irq_affinity_notify {
 extern int
 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
 
-static inline void irq_run_affinity_notifiers(void)
-{
-       flush_scheduled_work();
-}
-
 #else /* CONFIG_SMP */
 
 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
index fdf2c4a238cc605390b63bcdefb1f41533ae426b..bc4e06611958ce476c749b79b63b747a97390ea1 100644 (file)
@@ -509,8 +509,11 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
 
 /* Handle dynamic irq creation and destruction */
 extern unsigned int create_irq_nr(unsigned int irq_want, int node);
+extern unsigned int __create_irqs(unsigned int from, unsigned int count,
+                                 int node);
 extern int create_irq(void);
 extern void destroy_irq(unsigned int irq);
+extern void destroy_irqs(unsigned int irq, unsigned int count);
 
 /*
  * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and
@@ -528,6 +531,8 @@ extern int irq_set_handler_data(unsigned int irq, void *data);
 extern int irq_set_chip_data(unsigned int irq, void *data);
 extern int irq_set_irq_type(unsigned int irq, unsigned int type);
 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
+extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
+                               struct msi_desc *entry);
 extern struct irq_data *irq_get_irq_data(unsigned int irq);
 
 static inline struct irq_chip *irq_get_chip(unsigned int irq)
@@ -590,6 +595,9 @@ int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
 #define irq_alloc_desc_from(from, node)                \
        irq_alloc_descs(-1, from, 1, node)
 
+#define irq_alloc_descs_from(from, cnt, node)  \
+       irq_alloc_descs(-1, from, cnt, node)
+
 void irq_free_descs(unsigned int irq, unsigned int cnt);
 int irq_reserve_irqs(unsigned int from, unsigned int cnt);
 
index 6a9e8f5399e2ab137b3c251eb49349fdc6b333ac..f5dbce50466e6546fc72f4084ed0d0c91d91fe1e 100644 (file)
@@ -3,6 +3,20 @@
 
 #include <linux/llist.h>
 
+/*
+ * An entry can be in one of four states:
+ *
+ * free             NULL, 0 -> {claimed}       : free to be used
+ * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
+ * pending   next, 3 -> {busy}          : queued, pending callback
+ * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
+ */
+
+#define IRQ_WORK_PENDING       1UL
+#define IRQ_WORK_BUSY          2UL
+#define IRQ_WORK_FLAGS         3UL
+#define IRQ_WORK_LAZY          4UL /* Doesn't want IPI, wait for tick */
+
 struct irq_work {
        unsigned long flags;
        struct llist_node llnode;
@@ -16,8 +30,14 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
        work->func = func;
 }
 
-bool irq_work_queue(struct irq_work *work);
+void irq_work_queue(struct irq_work *work);
 void irq_work_run(void);
 void irq_work_sync(struct irq_work *work);
 
+#ifdef CONFIG_IRQ_WORK
+bool irq_work_needs_cpu(void);
+#else
+static bool irq_work_needs_cpu(void) { return false; }
+#endif
+
 #endif /* _LINUX_IRQ_WORK_H */
index 66b70780e910dfb846241bc3b70ce1a97aeeee47..ed5f6ed6eb772797ea1c7eb0e98e46f55dbcf027 100644 (file)
@@ -127,7 +127,7 @@ extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t)
 extern void account_steal_time(cputime_t);
 extern void account_idle_time(cputime_t);
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 static inline void account_process_tick(struct task_struct *tsk, int user)
 {
        vtime_account_user(tsk);
index 23755ba42abc456a06ffd663d0b3817570c1e7cb..4b6ef4d33cc26b65e242fb8f086cd3754672d962 100644 (file)
 #define KPROBE_REENTER         0x00000004
 #define KPROBE_HIT_SSDONE      0x00000008
 
-/*
- * If function tracer is enabled and the arch supports full
- * passing of pt_regs to function tracing, then kprobes can
- * optimize on top of function tracing.
- */
-#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
-       && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
-# define KPROBES_CAN_USE_FTRACE
-#endif
-
 /* Attach to insert probes on any functions which should be ignored*/
 #define __kprobes      __attribute__((__section__(".kprobes.text")))
 
@@ -316,7 +306,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
 #endif
 
 #endif /* CONFIG_OPTPROBES */
-#ifdef KPROBES_CAN_USE_FTRACE
+#ifdef CONFIG_KPROBES_ON_FTRACE
 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                                  struct ftrace_ops *ops, struct pt_regs *regs);
 extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
index 2c497ab0d03d41f7c8e85531a46776cba47f8d14..b7996a768eb2c656417fdb082f62871a89fc6f1d 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/rcupdate.h>
 #include <linux/ratelimit.h>
 #include <linux/err.h>
+#include <linux/irqflags.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -740,15 +741,52 @@ static inline int kvm_deassign_device(struct kvm *kvm,
 }
 #endif /* CONFIG_IOMMU_API */
 
-static inline void kvm_guest_enter(void)
+static inline void __guest_enter(void)
 {
-       BUG_ON(preemptible());
        /*
         * This is running in ioctl context so we can avoid
         * the call to vtime_account() with its unnecessary idle check.
         */
-       vtime_account_system_irqsafe(current);
+       vtime_account_system(current);
        current->flags |= PF_VCPU;
+}
+
+static inline void __guest_exit(void)
+{
+       /*
+        * This is running in ioctl context so we can avoid
+        * the call to vtime_account() with its unnecessary idle check.
+        */
+       vtime_account_system(current);
+       current->flags &= ~PF_VCPU;
+}
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern void guest_enter(void);
+extern void guest_exit(void);
+
+#else /* !CONFIG_CONTEXT_TRACKING */
+static inline void guest_enter(void)
+{
+       __guest_enter();
+}
+
+static inline void guest_exit(void)
+{
+       __guest_exit();
+}
+#endif /* !CONFIG_CONTEXT_TRACKING */
+
+static inline void kvm_guest_enter(void)
+{
+       unsigned long flags;
+
+       BUG_ON(preemptible());
+
+       local_irq_save(flags);
+       guest_enter();
+       local_irq_restore(flags);
+
        /* KVM does not hold any references to rcu protected data when it
         * switches CPU into a guest mode. In fact switching to a guest mode
         * is very similar to exiting to userspase from rcu point of view. In
@@ -761,12 +799,11 @@ static inline void kvm_guest_enter(void)
 
 static inline void kvm_guest_exit(void)
 {
-       /*
-        * This is running in ioctl context so we can avoid
-        * the call to vtime_account() with its unnecessary idle check.
-        */
-       vtime_account_system_irqsafe(current);
-       current->flags &= ~PF_VCPU;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       guest_exit();
+       local_irq_restore(flags);
 }
 
 /*
index 83ba0ab2c915c1bdb66fc7c922343b3f027ad120..649e5f86b5f00928b94e9f8515bf18f286e6b6e4 100644 (file)
@@ -652,8 +652,8 @@ struct ata_device {
                u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
        };
 
-       /* Identify Device Data Log (30h), SATA Settings (page 08h) */
-       u8                      sata_settings[ATA_SECT_SIZE];
+       /* DEVSLP Timing Variables from Identify Device Data Log */
+       u8                      devslp_timing[ATA_LOG_DEVSLP_SIZE];
 
        /* error history */
        int                     spdn_cnt;
index 79603a6c356fb5dd28f2bddf27bceafbf263c7d2..4ad06e824f76f797250b7cef01ac5932ffa7452b 100644 (file)
@@ -36,7 +36,7 @@ struct ps2dev {
        wait_queue_head_t wait;
 
        unsigned long flags;
-       unsigned char cmdbuf[6];
+       unsigned char cmdbuf[8];
        unsigned char cmdcnt;
        unsigned char nak;
 };
index a5199f6d0e82592dde0e4ba7908b1d1b70323483..d0ab98f73d380c67f2572a0b9536719e17325ca9 100644 (file)
@@ -124,6 +124,31 @@ static inline void init_llist_head(struct llist_head *list)
             &(pos)->member != NULL;                                    \
             (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
 
+/**
+ * llist_for_each_entry_safe - iterate safely against remove over some entries
+ * of lock-less list of given type.
+ * @pos:       the type * to use as a loop cursor.
+ * @n:         another type * to use as a temporary storage.
+ * @node:      the fist entry of deleted list entries.
+ * @member:    the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head. This variant allows removal of entries
+ * as we iterate.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry.  If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member)                \
+       for ((pos) = llist_entry((node), typeof(*(pos)), member),       \
+            (n) = (pos)->member.next;                                  \
+            &(pos)->member != NULL;                                    \
+            (pos) = llist_entry(n, typeof(*(pos)), member),            \
+            (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
+
 /**
  * llist_empty - tests whether a lock-less list is empty
  * @head:      the list to test
index 00e46376e28f73d33bdfbf56480932dfa12eee9f..2bca44b0893c787bdb463a8f42e7306e02edf87f 100644 (file)
@@ -524,14 +524,17 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
 #  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, NULL, i)
+#  define rwsem_acquire_nest(l, s, t, n, i)    lock_acquire(l, s, t, 0, 2, n, i)
 #  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 2, NULL, i)
 # else
 #  define rwsem_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, NULL, i)
+#  define rwsem_acquire_nest(l, s, t, n, i)    lock_acquire(l, s, t, 0, 1, n, i)
 #  define rwsem_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 1, 1, NULL, i)
 # endif
 # define rwsem_release(l, n, i)                        lock_release(l, n, i)
 #else
 # define rwsem_acquire(l, s, t, i)             do { } while (0)
+# define rwsem_acquire_nest(l, s, t, n, i)     do { } while (0)
 # define rwsem_acquire_read(l, s, t, i)                do { } while (0)
 # define rwsem_release(l, n, i)                        do { } while (0)
 #endif
index 0108a56f814ed7559b2f1fcee41a903890ed919d..28bd5fa2ff2eb1c0550891cfa5af551cc03b6c5c 100644 (file)
@@ -429,7 +429,7 @@ extern int memcg_limited_groups_array_size;
  * the slab_mutex must be held when looping through those caches
  */
 #define for_each_memcg_cache_index(_idx)       \
-       for ((_idx) = 0; i < memcg_limited_groups_array_size; (_idx)++)
+       for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
 
 static inline bool memcg_kmem_enabled(void)
 {
index 2138bd33021a629f59b868d5a3938aefa4f87449..80e3b8683a84f1c51fb793e8439bc436cb36117f 100644 (file)
@@ -131,7 +131,7 @@ struct abx500_maxim_parameters {
  * @nominal_voltage:           Nominal voltage of the battery in mV
  * @termination_vol:           max voltage upto which battery can be charged
  * @termination_curr           battery charging termination current in mA
- * @recharge_vol               battery voltage limit that will trigger a new
+ * @recharge_cap               battery capacity limit that will trigger a new
  *                             full charging cycle in the case where maintenan-
  *                             -ce charging has been disabled
  * @normal_cur_lvl:            charger current in normal state in mA
@@ -160,7 +160,7 @@ struct abx500_battery_type {
        int nominal_voltage;
        int termination_vol;
        int termination_curr;
-       int recharge_vol;
+       int recharge_cap;
        int normal_cur_lvl;
        int normal_vol_lvl;
        int maint_a_cur_lvl;
@@ -224,6 +224,7 @@ struct abx500_bm_charger_parameters {
  * @bkup_bat_v         voltage which we charge the backup battery with
  * @bkup_bat_i         current which we charge the backup battery with
  * @no_maintenance     indicates that maintenance charging is disabled
+ * @capacity_scaling    indicates whether capacity scaling is to be used
  * @abx500_adc_therm   placement of thermistor, batctrl or battemp adc
  * @chg_unknown_bat    flag to enable charging of unknown batteries
  * @enable_overshoot   flag to enable VBAT overshoot control
@@ -253,7 +254,11 @@ struct abx500_bm_data {
        int usb_safety_tmr_h;
        int bkup_bat_v;
        int bkup_bat_i;
+       bool autopower_cfg;
+       bool ac_enabled;
+       bool usb_enabled;
        bool no_maintenance;
+       bool capacity_scaling;
        bool chg_unknown_bat;
        bool enable_overshoot;
        bool auto_trig;
@@ -272,16 +277,14 @@ struct abx500_bm_data {
        const struct abx500_fg_parameters *fg_params;
 };
 
-extern struct abx500_bm_data ab8500_bm_data;
-
 enum {
        NTC_EXTERNAL = 0,
        NTC_INTERNAL,
 };
 
-int bmdevs_of_probe(struct device *dev,
-               struct device_node *np,
-               struct abx500_bm_data **battery);
+int ab8500_bm_of_probe(struct device *dev,
+                      struct device_node *np,
+                      struct abx500_bm_data *bm);
 
 int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
        u8 value);
index 44310c98ee6eb59f38ca3024622966e6b528bf1d..8d35bfe164c87f46f7c84e01ede1034322fa2870 100644 (file)
@@ -23,6 +23,7 @@
  * Bank : 0x5
  */
 #define AB8500_USB_LINE_STAT_REG       0x80
+#define AB8500_USB_LINK1_STAT_REG      0x94
 
 /*
  * Charger / status register offfsets
 /* BatCtrl Current Source Constants */
 #define BAT_CTRL_7U_ENA                        0x01
 #define BAT_CTRL_20U_ENA               0x02
+#define BAT_CTRL_18U_ENA               0x01
+#define BAT_CTRL_16U_ENA               0x02
 #define BAT_CTRL_CMP_ENA               0x04
 #define FORCE_BAT_CTRL_CMP_HIGH                0x08
 #define BAT_CTRL_PULL_UP_ENA           0x10
@@ -355,6 +358,7 @@ struct ab8500_bm_charger_parameters {
  * @bkup_bat_v         voltage which we charge the backup battery with
  * @bkup_bat_i         current which we charge the backup battery with
  * @no_maintenance     indicates that maintenance charging is disabled
+ * @capacity_scaling    indicates whether capacity scaling is to be used
  * @adc_therm          placement of thermistor, batctrl or battemp adc
  * @chg_unknown_bat    flag to enable charging of unknown batteries
  * @enable_overshoot   flag to enable VBAT overshoot control
@@ -383,6 +387,7 @@ struct ab8500_bm_data {
        int bkup_bat_v;
        int bkup_bat_i;
        bool no_maintenance;
+       bool capacity_scaling;
        bool chg_unknown_bat;
        bool enable_overshoot;
        enum abx500_adc_therm adc_therm;
@@ -399,30 +404,13 @@ struct ab8500_bm_data {
        const struct ab8500_fg_parameters *fg_params;
 };
 
-struct ab8500_charger_platform_data {
-       char **supplied_to;
-       size_t num_supplicants;
-       bool autopower_cfg;
-};
-
-struct ab8500_btemp_platform_data {
-       char **supplied_to;
-       size_t num_supplicants;
-};
-
-struct ab8500_fg_platform_data {
-       char **supplied_to;
-       size_t num_supplicants;
-};
-
-struct ab8500_chargalg_platform_data {
-       char **supplied_to;
-       size_t num_supplicants;
-};
 struct ab8500_btemp;
 struct ab8500_gpadc;
 struct ab8500_fg;
+
 #ifdef CONFIG_AB8500_BM
+extern struct abx500_bm_data ab8500_bm_data;
+
 void ab8500_fg_reinit(void);
 void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
 struct ab8500_btemp *ab8500_btemp_get(void);
@@ -431,44 +419,10 @@ struct ab8500_fg *ab8500_fg_get(void);
 int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
 int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
 int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
+int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
 int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
 
 #else
-int ab8500_fg_inst_curr_done(struct ab8500_fg *di)
-{
-}
-static void ab8500_fg_reinit(void)
-{
-}
-static void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA)
-{
-}
-static struct ab8500_btemp *ab8500_btemp_get(void)
-{
-       return NULL;
-}
-static int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp)
-{
-       return 0;
-}
-struct ab8500_fg *ab8500_fg_get(void)
-{
-       return NULL;
-}
-static int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev)
-{
-       return -ENODEV;
-}
-
-static inline int ab8500_fg_inst_curr_start(struct ab8500_fg *di)
-{
-       return -ENODEV;
-}
-
-static inline int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res)
-{
-       return -ENODEV;
-}
-
+static struct abx500_bm_data ab8500_bm_data;
 #endif
 #endif /* _AB8500_BM_H */
index 2387c207ea86d5cc2cf5df7e8aa20e90c3ae2301..172b2f201ae0f345271ebb3486b9beb12142af16 100644 (file)
  * registers.
  */
 
-struct ab8500_gpio_platform_data {
+struct abx500_gpio_platform_data {
        int gpio_base;
-       u32 irq_base;
-       u8  config_reg[8];
+};
+
+enum abx500_gpio_pull_updown {
+       ABX500_GPIO_PULL_DOWN = 0x0,
+       ABX500_GPIO_PULL_NONE = 0x1,
+       ABX500_GPIO_PULL_UP = 0x3,
+};
+
+enum abx500_gpio_vinsel {
+       ABX500_GPIO_VINSEL_VBAT = 0x0,
+       ABX500_GPIO_VINSEL_VIN_1V8 = 0x1,
+       ABX500_GPIO_VINSEL_VDD_BIF = 0x2,
 };
 
 #endif /* _AB8500_GPIO_H */
index 1cb5698b4d76a6bfe0b0a36d1c90a2f10101f9f6..fc0534483c726daaefb0cd5e50116b142a17a4dc 100644 (file)
@@ -24,7 +24,7 @@ enum ab8500_version {
        AB8500_VERSION_AB8500 = 0x0,
        AB8500_VERSION_AB8505 = 0x1,
        AB8500_VERSION_AB9540 = 0x2,
-       AB8500_VERSION_AB8540 = 0x3,
+       AB8500_VERSION_AB8540 = 0x4,
        AB8500_VERSION_UNDEFINED,
 };
 
@@ -32,6 +32,7 @@ enum ab8500_version {
 #define AB8500_CUTEARLY        0x00
 #define AB8500_CUT1P0  0x10
 #define AB8500_CUT1P1  0x11
+#define AB8500_CUT1P2  0x12 /* Only valid for AB8540 */
 #define AB8500_CUT2P0  0x20
 #define AB8500_CUT3P0  0x30
 #define AB8500_CUT3P3  0x33
@@ -39,6 +40,7 @@ enum ab8500_version {
 /*
  * AB8500 bank addresses
  */
+#define AB8500_M_FSM_RANK      0x0
 #define AB8500_SYS_CTRL1_BLOCK 0x1
 #define AB8500_SYS_CTRL2_BLOCK 0x2
 #define AB8500_REGU_CTRL1      0x3
@@ -58,6 +60,7 @@ enum ab8500_version {
 #define AB8500_DEVELOPMENT     0x11
 #define AB8500_DEBUG           0x12
 #define AB8500_PROD_TEST       0x13
+#define AB8500_STE_TEST                0x14
 #define AB8500_OTP_EMUL                0x15
 
 /*
@@ -65,11 +68,11 @@ enum ab8500_version {
  * Values used to index into array ab8500_irq_regoffset[] defined in
  * drivers/mdf/ab8500-core.c
  */
-/* Definitions for AB8500 and AB9540 */
+/* Definitions for AB8500, AB9540 and AB8540 */
 /* ab8500_irq_regoffset[0] -> IT[Source|Latch|Mask]1 */
 #define AB8500_INT_MAIN_EXT_CH_NOT_OK  0 /* not 8505/9540 */
-#define AB8500_INT_UN_PLUG_TV_DET      1 /* not 8505/9540 */
-#define AB8500_INT_PLUG_TV_DET         2 /* not 8505/9540 */
+#define AB8500_INT_UN_PLUG_TV_DET      1 /* not 8505/9540/8540 */
+#define AB8500_INT_PLUG_TV_DET         2 /* not 8505/9540/8540 */
 #define AB8500_INT_TEMP_WARM           3
 #define AB8500_INT_PON_KEY2DB_F                4
 #define AB8500_INT_PON_KEY2DB_R                5
@@ -77,18 +80,19 @@ enum ab8500_version {
 #define AB8500_INT_PON_KEY1DB_R                7
 /* ab8500_irq_regoffset[1] -> IT[Source|Latch|Mask]2 */
 #define AB8500_INT_BATT_OVV            8
-#define AB8500_INT_MAIN_CH_UNPLUG_DET  10 /* not 8505 */
-#define AB8500_INT_MAIN_CH_PLUG_DET    11 /* not 8505 */
+#define AB8500_INT_MAIN_CH_UNPLUG_DET  10 /* not 8505/8540 */
+#define AB8500_INT_MAIN_CH_PLUG_DET    11 /* not 8505/8540 */
 #define AB8500_INT_VBUS_DET_F          14
 #define AB8500_INT_VBUS_DET_R          15
 /* ab8500_irq_regoffset[2] -> IT[Source|Latch|Mask]3 */
 #define AB8500_INT_VBUS_CH_DROP_END    16
 #define AB8500_INT_RTC_60S             17
 #define AB8500_INT_RTC_ALARM           18
+#define AB8540_INT_BIF_INT             19
 #define AB8500_INT_BAT_CTRL_INDB       20
 #define AB8500_INT_CH_WD_EXP           21
 #define AB8500_INT_VBUS_OVV            22
-#define AB8500_INT_MAIN_CH_DROP_END    23 /* not 8505/9540 */
+#define AB8500_INT_MAIN_CH_DROP_END    23 /* not 8505/9540/8540 */
 /* ab8500_irq_regoffset[3] -> IT[Source|Latch|Mask]4 */
 #define AB8500_INT_CCN_CONV_ACC                24
 #define AB8500_INT_INT_AUD             25
@@ -99,7 +103,7 @@ enum ab8500_version {
 #define AB8500_INT_BUP_CHG_NOT_OK      30
 #define AB8500_INT_BUP_CHG_OK          31
 /* ab8500_irq_regoffset[4] -> IT[Source|Latch|Mask]5 */
-#define AB8500_INT_GP_HW_ADC_CONV_END  32 /* not 8505 */
+#define AB8500_INT_GP_HW_ADC_CONV_END  32 /* not 8505/8540 */
 #define AB8500_INT_ACC_DETECT_1DB_F    33
 #define AB8500_INT_ACC_DETECT_1DB_R    34
 #define AB8500_INT_ACC_DETECT_22DB_F   35
@@ -108,23 +112,23 @@ enum ab8500_version {
 #define AB8500_INT_ACC_DETECT_21DB_R   38
 #define AB8500_INT_GP_SW_ADC_CONV_END  39
 /* ab8500_irq_regoffset[5] -> IT[Source|Latch|Mask]7 */
-#define AB8500_INT_GPIO6R              40 /* not 8505/9540 */
-#define AB8500_INT_GPIO7R              41 /* not 8505/9540 */
-#define AB8500_INT_GPIO8R              42 /* not 8505/9540 */
-#define AB8500_INT_GPIO9R              43 /* not 8505/9540 */
-#define AB8500_INT_GPIO10R             44
-#define AB8500_INT_GPIO11R             45
-#define AB8500_INT_GPIO12R             46 /* not 8505 */
-#define AB8500_INT_GPIO13R             47
+#define AB8500_INT_GPIO6R              40 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO7R              41 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO8R              42 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO9R              43 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO10R             44 /* not 8540 */
+#define AB8500_INT_GPIO11R             45 /* not 8540 */
+#define AB8500_INT_GPIO12R             46 /* not 8505/8540 */
+#define AB8500_INT_GPIO13R             47 /* not 8540 */
 /* ab8500_irq_regoffset[6] -> IT[Source|Latch|Mask]8 */
-#define AB8500_INT_GPIO24R             48 /* not 8505 */
-#define AB8500_INT_GPIO25R             49 /* not 8505 */
-#define AB8500_INT_GPIO36R             50 /* not 8505/9540 */
-#define AB8500_INT_GPIO37R             51 /* not 8505/9540 */
-#define AB8500_INT_GPIO38R             52 /* not 8505/9540 */
-#define AB8500_INT_GPIO39R             53 /* not 8505/9540 */
-#define AB8500_INT_GPIO40R             54
-#define AB8500_INT_GPIO41R             55
+#define AB8500_INT_GPIO24R             48 /* not 8505/8540 */
+#define AB8500_INT_GPIO25R             49 /* not 8505/8540 */
+#define AB8500_INT_GPIO36R             50 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO37R             51 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO38R             52 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO39R             53 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO40R             54 /* not 8540 */
+#define AB8500_INT_GPIO41R             55 /* not 8540 */
 /* ab8500_irq_regoffset[7] -> IT[Source|Latch|Mask]9 */
 #define AB8500_INT_GPIO6F              56 /* not 8505/9540 */
 #define AB8500_INT_GPIO7F              57 /* not 8505/9540 */
@@ -135,14 +139,14 @@ enum ab8500_version {
 #define AB8500_INT_GPIO12F             62 /* not 8505 */
 #define AB8500_INT_GPIO13F             63
 /* ab8500_irq_regoffset[8] -> IT[Source|Latch|Mask]10 */
-#define AB8500_INT_GPIO24F             64 /* not 8505 */
-#define AB8500_INT_GPIO25F             65 /* not 8505 */
-#define AB8500_INT_GPIO36F             66 /* not 8505/9540 */
-#define AB8500_INT_GPIO37F             67 /* not 8505/9540 */
-#define AB8500_INT_GPIO38F             68 /* not 8505/9540 */
-#define AB8500_INT_GPIO39F             69 /* not 8505/9540 */
-#define AB8500_INT_GPIO40F             70
-#define AB8500_INT_GPIO41F             71
+#define AB8500_INT_GPIO24F             64 /* not 8505/8540 */
+#define AB8500_INT_GPIO25F             65 /* not 8505/8540 */
+#define AB8500_INT_GPIO36F             66 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO37F             67 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO38F             68 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO39F             69 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO40F             70 /* not 8540 */
+#define AB8500_INT_GPIO41F             71 /* not 8540 */
 /* ab8500_irq_regoffset[9] -> IT[Source|Latch|Mask]12 */
 #define AB8500_INT_ADP_SOURCE_ERROR    72
 #define AB8500_INT_ADP_SINK_ERROR      73
@@ -160,42 +164,44 @@ enum ab8500_version {
 #define AB8500_INT_SRP_DETECT          88
 #define AB8500_INT_USB_CHARGER_NOT_OKR 89
 #define AB8500_INT_ID_WAKEUP_R         90
+#define AB8500_INT_ID_DET_PLUGR         91 /* 8505/9540 cut2.0 */
 #define AB8500_INT_ID_DET_R1R          92
 #define AB8500_INT_ID_DET_R2R          93
 #define AB8500_INT_ID_DET_R3R          94
 #define AB8500_INT_ID_DET_R4R          95
 /* ab8500_irq_regoffset[12] -> IT[Source|Latch|Mask]21 */
-#define AB8500_INT_ID_WAKEUP_F         96
-#define AB8500_INT_ID_DET_R1F          98
-#define AB8500_INT_ID_DET_R2F          99
-#define AB8500_INT_ID_DET_R3F          100
-#define AB8500_INT_ID_DET_R4F          101
-#define AB8500_INT_CHAUTORESTARTAFTSEC  102
+#define AB8500_INT_ID_WAKEUP_F         96 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_PLUGF                97 /* 8505/9540 cut2.0 */
+#define AB8500_INT_ID_DET_R1F          98 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_R2F          99 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_R3F          100 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_R4F          101 /* not 8505/9540 */
+#define AB8500_INT_CHAUTORESTARTAFTSEC 102 /* not 8505/9540 */
 #define AB8500_INT_CHSTOPBYSEC         103
 /* ab8500_irq_regoffset[13] -> IT[Source|Latch|Mask]22 */
 #define AB8500_INT_USB_CH_TH_PROT_F    104
-#define AB8500_INT_USB_CH_TH_PROT_R    105
+#define AB8500_INT_USB_CH_TH_PROT_R    105
 #define AB8500_INT_MAIN_CH_TH_PROT_F   106 /* not 8505/9540 */
 #define AB8500_INT_MAIN_CH_TH_PROT_R   107 /* not 8505/9540 */
 #define AB8500_INT_CHCURLIMNOHSCHIRP   109
 #define AB8500_INT_CHCURLIMHSCHIRP     110
 #define AB8500_INT_XTAL32K_KO          111
 
-/* Definitions for AB9540 */
+/* Definitions for AB9540 / AB8505 */
 /* ab8500_irq_regoffset[14] -> IT[Source|Latch|Mask]13 */
-#define AB9540_INT_GPIO50R             113
-#define AB9540_INT_GPIO51R             114 /* not 8505 */
-#define AB9540_INT_GPIO52R             115
-#define AB9540_INT_GPIO53R             116
-#define AB9540_INT_GPIO54R             117 /* not 8505 */
+#define AB9540_INT_GPIO50R             113 /* not 8540 */
+#define AB9540_INT_GPIO51R             114 /* not 8505/8540 */
+#define AB9540_INT_GPIO52R             115 /* not 8540 */
+#define AB9540_INT_GPIO53R             116 /* not 8540 */
+#define AB9540_INT_GPIO54R             117 /* not 8505/8540 */
 #define AB9540_INT_IEXT_CH_RF_BFN_R    118
-#define AB9540_INT_IEXT_CH_RF_BFN_F    119
 /* ab8500_irq_regoffset[15] -> IT[Source|Latch|Mask]14 */
-#define AB9540_INT_GPIO50F             121
-#define AB9540_INT_GPIO51F             122 /* not 8505 */
-#define AB9540_INT_GPIO52F             123
-#define AB9540_INT_GPIO53F             124
-#define AB9540_INT_GPIO54F             125 /* not 8505 */
+#define AB9540_INT_GPIO50F             121 /* not 8540 */
+#define AB9540_INT_GPIO51F             122 /* not 8505/8540 */
+#define AB9540_INT_GPIO52F             123 /* not 8540 */
+#define AB9540_INT_GPIO53F             124 /* not 8540 */
+#define AB9540_INT_GPIO54F             125 /* not 8505/8540 */
+#define AB9540_INT_IEXT_CH_RF_BFN_F    126
 /* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
 #define AB8505_INT_KEYSTUCK            128
 #define AB8505_INT_IKR                 129
@@ -204,6 +210,87 @@ enum ab8500_version {
 #define AB8505_INT_KEYDEGLITCH         132
 #define AB8505_INT_MODPWRSTATUSF       134
 #define AB8505_INT_MODPWRSTATUSR       135
+/* ab8500_irq_regoffset[17] -> IT[Source|Latch|Mask]6 */
+#define AB8500_INT_HOOK_DET_NEG_F      138
+#define AB8500_INT_HOOK_DET_NEG_R      139
+#define AB8500_INT_HOOK_DET_POS_F      140
+#define AB8500_INT_HOOK_DET_POS_R      141
+#define AB8500_INT_PLUG_DET_COMP_F     142
+#define AB8500_INT_PLUG_DET_COMP_R     143
+/* ab8500_irq_regoffset[18] -> IT[Source|Latch|Mask]23 */
+#define AB8505_INT_COLL                        144
+#define AB8505_INT_RESERR              145
+#define AB8505_INT_FRAERR              146
+#define AB8505_INT_COMERR              147
+#define AB8505_INT_SPDSET              148
+#define AB8505_INT_DSENT               149
+#define AB8505_INT_DREC                        150
+#define AB8505_INT_ACC_INT             151
+/* ab8500_irq_regoffset[19] -> IT[Source|Latch|Mask]24 */
+#define AB8505_INT_NOPINT              152
+/* ab8540_irq_regoffset[20] -> IT[Source|Latch|Mask]26 */
+#define AB8540_INT_IDPLUGDETCOMPF      160
+#define AB8540_INT_IDPLUGDETCOMPR      161
+#define AB8540_INT_FMDETCOMPLOF                162
+#define AB8540_INT_FMDETCOMPLOR                163
+#define AB8540_INT_FMDETCOMPHIF                164
+#define AB8540_INT_FMDETCOMPHIR                165
+#define AB8540_INT_ID5VDETCOMPF                166
+#define AB8540_INT_ID5VDETCOMPR                167
+/* ab8540_irq_regoffset[21] -> IT[Source|Latch|Mask]27 */
+#define AB8540_INT_GPIO43F             168
+#define AB8540_INT_GPIO43R             169
+#define AB8540_INT_GPIO44F             170
+#define AB8540_INT_GPIO44R             171
+#define AB8540_INT_KEYPOSDETCOMPF      172
+#define AB8540_INT_KEYPOSDETCOMPR      173
+#define AB8540_INT_KEYNEGDETCOMPF      174
+#define AB8540_INT_KEYNEGDETCOMPR      175
+/* ab8540_irq_regoffset[22] -> IT[Source|Latch|Mask]28 */
+#define AB8540_INT_GPIO1VBATF          176
+#define AB8540_INT_GPIO1VBATR          177
+#define AB8540_INT_GPIO2VBATF          178
+#define AB8540_INT_GPIO2VBATR          179
+#define AB8540_INT_GPIO3VBATF          180
+#define AB8540_INT_GPIO3VBATR          181
+#define AB8540_INT_GPIO4VBATF          182
+#define AB8540_INT_GPIO4VBATR          183
+/* ab8540_irq_regoffset[23] -> IT[Source|Latch|Mask]29 */
+#define AB8540_INT_SYSCLKREQ2F         184
+#define AB8540_INT_SYSCLKREQ2R         185
+#define AB8540_INT_SYSCLKREQ3F         186
+#define AB8540_INT_SYSCLKREQ3R         187
+#define AB8540_INT_SYSCLKREQ4F         188
+#define AB8540_INT_SYSCLKREQ4R         189
+#define AB8540_INT_SYSCLKREQ5F         190
+#define AB8540_INT_SYSCLKREQ5R         191
+/* ab8540_irq_regoffset[24] -> IT[Source|Latch|Mask]30 */
+#define AB8540_INT_PWMOUT1F            192
+#define AB8540_INT_PWMOUT1R            193
+#define AB8540_INT_PWMCTRL0F           194
+#define AB8540_INT_PWMCTRL0R           195
+#define AB8540_INT_PWMCTRL1F           196
+#define AB8540_INT_PWMCTRL1R           197
+#define AB8540_INT_SYSCLKREQ6F         198
+#define AB8540_INT_SYSCLKREQ6R         199
+/* ab8540_irq_regoffset[25] -> IT[Source|Latch|Mask]31 */
+#define AB8540_INT_PWMEXTVIBRA1F       200
+#define AB8540_INT_PWMEXTVIBRA1R       201
+#define AB8540_INT_PWMEXTVIBRA2F       202
+#define AB8540_INT_PWMEXTVIBRA2R       203
+#define AB8540_INT_PWMOUT2F            204
+#define AB8540_INT_PWMOUT2R            205
+#define AB8540_INT_PWMOUT3F            206
+#define AB8540_INT_PWMOUT3R            207
+/* ab8540_irq_regoffset[26] -> IT[Source|Latch|Mask]32 */
+#define AB8540_INT_ADDATA2F            208
+#define AB8540_INT_ADDATA2R            209
+#define AB8540_INT_DADATA2F            210
+#define AB8540_INT_DADATA2R            211
+#define AB8540_INT_FSYNC2F             212
+#define AB8540_INT_FSYNC2R             213
+#define AB8540_INT_BITCLK2F            214
+#define AB8540_INT_BITCLK2R            215
 
 /*
  * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -213,13 +300,24 @@ enum ab8500_version {
  * which is larger.
  */
 #define AB8500_NR_IRQS                 112
-#define AB8505_NR_IRQS                 136
-#define AB9540_NR_IRQS                 136
+#define AB8505_NR_IRQS                 153
+#define AB9540_NR_IRQS                 153
+#define AB8540_NR_IRQS                 216
 /* This is set to the roof of any AB8500 chip variant IRQ counts */
-#define AB8500_MAX_NR_IRQS             AB9540_NR_IRQS
+#define AB8500_MAX_NR_IRQS             AB8540_NR_IRQS
 
 #define AB8500_NUM_IRQ_REGS            14
-#define AB9540_NUM_IRQ_REGS            17
+#define AB9540_NUM_IRQ_REGS            20
+#define AB8540_NUM_IRQ_REGS            27
+
+/* Turn On Status Event */
+#define AB8500_POR_ON_VBAT             0x01
+#define AB8500_POW_KEY_1_ON            0x02
+#define AB8500_POW_KEY_2_ON            0x04
+#define AB8500_RTC_ALARM               0x08
+#define AB8500_MAIN_CH_DET             0x10
+#define AB8500_VBUS_DET                        0x20
+#define AB8500_USB_ID_DET              0x40
 
 /**
  * struct ab8500 - ab8500 internal structure
@@ -287,7 +385,7 @@ struct ab8500_platform_data {
        struct ab8500_regulator_reg_init *regulator_reg_init;
        int num_regulator;
        struct regulator_init_data *regulator;
-       struct ab8500_gpio_platform_data *gpio;
+       struct abx500_gpio_platform_data *gpio;
        struct ab8500_codec_platform_data *codec;
 };
 
@@ -335,10 +433,79 @@ static inline int is_ab8500_2p0_or_earlier(struct ab8500 *ab)
        return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT2P0));
 }
 
+static inline int is_ab8500_3p3_or_earlier(struct ab8500 *ab)
+{
+       return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT3P3));
+}
+
 /* exclude also ab8505, ab9540... */
 static inline int is_ab8500_2p0(struct ab8500 *ab)
 {
        return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0));
 }
 
+static inline int is_ab8505_1p0_or_earlier(struct ab8500 *ab)
+{
+       return (is_ab8505(ab) && (ab->chip_id <= AB8500_CUT1P0));
+}
+
+static inline int is_ab8505_2p0(struct ab8500 *ab)
+{
+       return (is_ab8505(ab) && (ab->chip_id == AB8500_CUT2P0));
+}
+
+static inline int is_ab9540_1p0_or_earlier(struct ab8500 *ab)
+{
+       return (is_ab9540(ab) && (ab->chip_id <= AB8500_CUT1P0));
+}
+
+static inline int is_ab9540_2p0(struct ab8500 *ab)
+{
+       return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT2P0));
+}
+
+/*
+ * Be careful, the marketing name for this chip is 2.1
+ * but the value read from the chip is 3.0 (0x30)
+ */
+static inline int is_ab9540_3p0(struct ab8500 *ab)
+{
+       return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT3P0));
+}
+
+static inline int is_ab8540_1p0_or_earlier(struct ab8500 *ab)
+{
+       return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P0);
+}
+
+static inline int is_ab8540_1p1_or_earlier(struct ab8500 *ab)
+{
+       return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P1);
+}
+
+static inline int is_ab8540_1p2_or_earlier(struct ab8500 *ab)
+{
+       return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P2);
+}
+
+static inline int is_ab8540_2p0_or_earlier(struct ab8500 *ab)
+{
+       return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT2P0);
+}
+
+static inline int is_ab8540_2p0(struct ab8500 *ab)
+{
+       return is_ab8540(ab) && (ab->chip_id == AB8500_CUT2P0);
+}
+
+static inline int is_ab8505_2p0_earlier(struct ab8500 *ab)
+{
+       return (is_ab8505(ab) && (ab->chip_id < AB8500_CUT2P0));
+}
+
+static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
+{
+       return (is_ab9540(ab) && (ab->chip_id < AB8500_CUT2P0));
+}
+
 #endif /* MFD_AB8500_H */
index 9b07725750c90bc04374610b663fd90c53037f78..d43ac0f355263e7ac188d09616fef59476feec03 100644 (file)
@@ -27,12 +27,17 @@ struct ux500_charger_ops {
  * @ops                        ux500 charger operations
  * @max_out_volt       maximum output charger voltage in mV
  * @max_out_curr       maximum output charger current in mA
+ * @enabled            indicates if this charger is used or not
+ * @external           external charger unit (pm2xxx)
  */
 struct ux500_charger {
        struct power_supply psy;
        struct ux500_charger_ops ops;
        int max_out_volt;
        int max_out_curr;
+       int wdt_refresh;
+       bool enabled;
+       bool external;
 };
 
 #endif
index 86dd93de6ff2b70ac4c693e8a9a05083bea72503..786d02eb79d2210ef8c3dfe145e3c8d0e0123ef1 100644 (file)
@@ -99,6 +99,9 @@ struct da9052 {
        u8 chip_id;
 
        int chip_irq;
+
+       /* SOC I/O transfer related fixes for DA9052/53 */
+       int (*fix_io) (struct da9052 *da9052, unsigned char reg);
 };
 
 /* ADC API */
@@ -113,32 +116,87 @@ static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
        ret = regmap_read(da9052->regmap, reg, &val);
        if (ret < 0)
                return ret;
+
+       if (da9052->fix_io) {
+               ret = da9052->fix_io(da9052, reg);
+               if (ret < 0)
+                       return ret;
+       }
+
        return val;
 }
 
 static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg,
                                    unsigned char val)
 {
-       return regmap_write(da9052->regmap, reg, val);
+       int ret;
+
+       ret = regmap_write(da9052->regmap, reg, val);
+       if (ret < 0)
+               return ret;
+
+       if (da9052->fix_io) {
+               ret = da9052->fix_io(da9052, reg);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return ret;
 }
 
 static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
                                     unsigned reg_cnt, unsigned char *val)
 {
-       return regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
+       int ret;
+
+       ret = regmap_bulk_read(da9052->regmap, reg, val, reg_cnt);
+       if (ret < 0)
+               return ret;
+
+       if (da9052->fix_io) {
+               ret = da9052->fix_io(da9052, reg);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return ret;
 }
 
 static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
                                      unsigned reg_cnt, unsigned char *val)
 {
-       return regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
+       int ret;
+
+       ret = regmap_raw_write(da9052->regmap, reg, val, reg_cnt);
+       if (ret < 0)
+               return ret;
+
+       if (da9052->fix_io) {
+               ret = da9052->fix_io(da9052, reg);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return ret;
 }
 
 static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
                                     unsigned char bit_mask,
                                     unsigned char reg_val)
 {
-       return regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val);
+       int ret;
+
+       ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val);
+       if (ret < 0)
+               return ret;
+
+       if (da9052->fix_io) {
+               ret = da9052->fix_io(da9052, reg);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return ret;
 }
 
 int da9052_device_init(struct da9052 *da9052, u8 chip_id);
index b97f7309d7f69e7189d74aead8ac4281a8ed0881..c4dd3a8add21b94cc526843c117734c506cc5233 100644 (file)
@@ -34,6 +34,9 @@
 #define DA9052_STATUS_C_REG            3
 #define DA9052_STATUS_D_REG            4
 
+/* PARK REGISTER */
+#define DA9052_PARK_REGISTER           DA9052_STATUS_D_REG
+
 /* EVENT REGISTERS */
 #define DA9052_EVENT_A_REG             5
 #define DA9052_EVENT_B_REG             6
index a8d393e3066b74465f14994740b758f1a134850b..2b13970596f53732cda07a6fd3b270cc9e933e5b 100644 (file)
@@ -38,6 +38,9 @@
 #define RTSX_SD_CARD                   0
 #define RTSX_MS_CARD                   1
 
+#define CLK_TO_DIV_N                   0
+#define DIV_N_TO_CLK                   1
+
 struct platform_device;
 
 struct rtsx_slot {
index 060b721fcbfb93b690d51191daeb95da692eef8c..4b117a3f54d493f59393226c6351e1fa8c1e16ca 100644 (file)
 #define SG_TRANS_DATA          (0x02 << 4)
 #define SG_LINK_DESC           (0x03 << 4)
 
-/* SD bank voltage */
-#define SD_IO_3V3              0
-#define SD_IO_1V8              1
-
+/* Output voltage */
+#define OUTPUT_3V3             0
+#define OUTPUT_1V8             1
 
 /* Card Clock Enable Register */
 #define SD_CLK_EN                      0x04
 #define CHANGE_CLK                     0x01
 
 /* LDO_CTL */
+#define BPP_ASIC_1V7                   0x00
+#define BPP_ASIC_1V8                   0x01
+#define BPP_ASIC_1V9                   0x02
+#define BPP_ASIC_2V0                   0x03
+#define BPP_ASIC_2V7                   0x04
+#define BPP_ASIC_2V8                   0x05
+#define BPP_ASIC_3V2                   0x06
+#define BPP_ASIC_3V3                   0x07
+#define BPP_REG_TUNED18                        0x07
+#define BPP_TUNED18_SHIFT_8402         5
+#define BPP_TUNED18_SHIFT_8411         4
+#define BPP_PAD_MASK                   0x04
+#define BPP_PAD_3V3                    0x04
+#define BPP_PAD_1V8                    0x00
 #define BPP_LDO_POWB                   0x03
 #define BPP_LDO_ON                     0x00
 #define BPP_LDO_SUSPEND                        0x02
@@ -688,7 +701,10 @@ struct pcr_ops {
        int             (*disable_auto_blink)(struct rtsx_pcr *pcr);
        int             (*card_power_on)(struct rtsx_pcr *pcr, int card);
        int             (*card_power_off)(struct rtsx_pcr *pcr, int card);
+       int             (*switch_output_voltage)(struct rtsx_pcr *pcr,
+                                               u8 voltage);
        unsigned int    (*cd_deglitch)(struct rtsx_pcr *pcr);
+       int             (*conv_clk_and_div_n)(int clk, int dir);
 };
 
 enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -783,6 +799,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
                u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
 
index b50c38f8bc4829b0460c24f113148df9d82ef54c..f0f4de3b4ccc654b436dfdc2c9167feeb5e27793 100644 (file)
@@ -26,6 +26,7 @@ enum sec_device_type {
 /**
  * struct sec_pmic_dev - s5m87xx master device for sub-drivers
  * @dev: master device of the chip (can be used to access platform data)
+ * @pdata: pointer to private data used to pass platform data to child
  * @i2c: i2c client private data for regulator
  * @rtc: i2c client private data for rtc
  * @iolock: mutex for serializing io access
@@ -39,6 +40,7 @@ enum sec_device_type {
  */
 struct sec_pmic_dev {
        struct device *dev;
+       struct sec_platform_data *pdata;
        struct regmap *regmap;
        struct i2c_client *i2c;
        struct i2c_client *rtc;
@@ -82,11 +84,11 @@ struct sec_platform_data {
 
        int                             buck_gpios[3];
        int                             buck_ds[3];
-       int                             buck2_voltage[8];
+       unsigned int                    buck2_voltage[8];
        bool                            buck2_gpiodvs;
-       int                             buck3_voltage[8];
+       unsigned int                    buck3_voltage[8];
        bool                            buck3_gpiodvs;
-       int                             buck4_voltage[8];
+       unsigned int                    buck4_voltage[8];
        bool                            buck4_gpiodvs;
 
        int                             buck_set1;
@@ -127,6 +129,7 @@ struct sec_platform_data {
 struct sec_regulator_data {
        int                             id;
        struct regulator_init_data      *initdata;
+       struct device_node *reg_node;
 };
 
 /*
@@ -136,7 +139,7 @@ struct sec_regulator_data {
  */
 struct sec_opmode_data {
        int id;
-       int mode;
+       unsigned int mode;
 };
 
 /*
index 63204078f72b9798901b6ef6371847ade8031809..66e2f7c61e5c9d3a2924389e28ffa9d32c8728bf 100644 (file)
@@ -455,7 +455,6 @@ void put_pages_list(struct list_head *pages);
 
 void split_page(struct page *page, unsigned int order);
 int split_free_page(struct page *page);
-int capture_free_page(struct page *page, int alloc_order, int migratetype);
 
 /*
  * Compound pages have a destructor function.  Provide a
index bc823c4c028bd8c0404940384da729c072dc31a3..deca87452528b2888823a4daf4cb148d43a52544 100644 (file)
@@ -151,7 +151,7 @@ struct mmu_notifier_ops {
  * Therefore notifier chains can only be traversed when either
  *
  * 1. mmap_sem is held.
- * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->mutex).
+ * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem).
  * 3. No other concurrent thread can access the list (release)
  */
 struct mmu_notifier {
index 7760c6d344a32cd6a59cbb163a56c9bcd4888683..1375ee3f03aadd4d2504762628a3991272861fa8 100644 (file)
@@ -199,11 +199,11 @@ struct module_use {
        struct module *source, *target;
 };
 
-enum module_state
-{
-       MODULE_STATE_LIVE,
-       MODULE_STATE_COMING,
-       MODULE_STATE_GOING,
+enum module_state {
+       MODULE_STATE_LIVE,      /* Normal state. */
+       MODULE_STATE_COMING,    /* Full formed, running module_init. */
+       MODULE_STATE_GOING,     /* Going away. */
+       MODULE_STATE_UNFORMED,  /* Still setting it up. */
 };
 
 /**
index c599e4782d454cc853a6a9c1491f38bd1c1509b4..9ef07d0868b6012da1d1089daa2c9b0d7d304389 100644 (file)
@@ -60,6 +60,9 @@ struct wireless_dev;
 #define SET_ETHTOOL_OPS(netdev,ops) \
        ( (netdev)->ethtool_ops = (ops) )
 
+extern void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                          const struct ethtool_ops *ops);
+
 /* hardware address assignment types */
 #define NET_ADDR_PERM          0       /* address is permanent (default) */
 #define NET_ADDR_RANDOM                1       /* address is generated randomly */
index 15472d691ee68c6aad5dbeb0a75933ec18f00404..6fa4dd2a3b9e08f94b2f6b446d88b8b2ed8e3ad4 100644 (file)
@@ -1101,6 +1101,12 @@ static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
        return -1;
 }
 
+static inline int
+pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec)
+{
+       return -1;
+}
+
 static inline void pci_msi_shutdown(struct pci_dev *dev)
 { }
 static inline void pci_disable_msi(struct pci_dev *dev)
@@ -1132,6 +1138,7 @@ static inline int pci_msi_enabled(void)
 }
 #else
 extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
+extern int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec);
 extern void pci_msi_shutdown(struct pci_dev *dev);
 extern void pci_disable_msi(struct pci_dev *dev);
 extern int pci_msix_table_size(struct pci_dev *dev);
index 6bfb2faa0b1937555f5ca8fa937c7007bf5b3bc4..e47ee462c2f2e69a8cbef4ef3a3d7bfc873cb51f 100644 (file)
@@ -135,16 +135,21 @@ struct hw_perf_event {
                struct { /* software */
                        struct hrtimer  hrtimer;
                };
+               struct { /* tracepoint */
+                       struct task_struct      *tp_target;
+                       /* for tp_event->class */
+                       struct list_head        tp_list;
+               };
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                struct { /* breakpoint */
-                       struct arch_hw_breakpoint       info;
-                       struct list_head                bp_list;
                        /*
                         * Crufty hack to avoid the chicken and egg
                         * problem hw_breakpoint has with context
                         * creation and event initalization.
                         */
                        struct task_struct              *bp_target;
+                       struct arch_hw_breakpoint       info;
+                       struct list_head                bp_list;
                };
 #endif
        };
@@ -817,6 +822,17 @@ do {                                                                       \
 } while (0)
 
 
+struct perf_pmu_events_attr {
+       struct device_attribute attr;
+       u64 id;
+};
+
+#define PMU_EVENT_ATTR(_name, _var, _id, _show)                                \
+static struct perf_pmu_events_attr _var = {                            \
+       .attr = __ATTR(_name, 0444, _show, NULL),                       \
+       .id   =  _id,                                                   \
+};
+
 #define PMU_FORMAT_ATTR(_name, _format)                                        \
 static ssize_t                                                         \
 _name##_show(struct device *dev,                                       \
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
new file mode 100644 (file)
index 0000000..6e5f8a9
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Per-device information from the pin control system.
+ * This is the stuff that get included into the device
+ * core.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef PINCTRL_DEVINFO_H
+#define PINCTRL_DEVINFO_H
+
+#ifdef CONFIG_PINCTRL
+
+/* The device core acts as a consumer toward pinctrl */
+#include <linux/pinctrl/consumer.h>
+
+/**
+ * struct dev_pin_info - pin state container for devices
+ * @p: pinctrl handle for the containing device
+ * @default_state: the default state for the handle, if found
+ */
+struct dev_pin_info {
+       struct pinctrl *p;
+       struct pinctrl_state *default_state;
+};
+
+extern int pinctrl_bind_pins(struct device *dev);
+
+#else
+
+/* Stubs if we're not using pinctrl */
+
+static inline int pinctrl_bind_pins(struct device *dev)
+{
+       return 0;
+}
+
+#endif /* CONFIG_PINCTRL */
+#endif /* PINCTRL_DEVINFO_H */
index 47a1bdd88878da4b6a7be614b654b8b0ce228006..72474e18f1e0d8fef7441d3f11269eb90059b684 100644 (file)
  * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
  *     (open emitter). Sending this config will enabale open drain mode, the
  *     argument is ignored.
- * @PIN_CONFIG_INPUT_SCHMITT_DISABLE: disable schmitt-trigger mode on the pin.
+ * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as
+ *     argument. The argument is in mA.
+ * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
+ *      If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
+ *      schmitt-trigger mode is disabled.
  * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in
  *     schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis,
  *     the threshold value is given on a custom format as argument when
  * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
  *     supplies, the argument to this parameter (on a custom format) tells
  *     the driver which alternative power source to use.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ *     this parameter (on a custom format) tells the driver which alternative
+ *     slew rate to use.
  * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
  *     operation, if several modes of operation are supported these can be
  *     passed in the argument on a custom form, else just use argument 1
  *     to indicate low power mode, argument 0 turns low power mode off.
+ * @PIN_CONFIG_OUTPUT: this will configure the pin in output, use argument
+ *     1 to indicate high level, argument 0 to indicate low level.
  * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
  *     you need to pass in custom configurations to the pin controller, use
  *     PIN_CONFIG_END+1 as the base offset.
@@ -74,11 +83,14 @@ enum pin_config_param {
        PIN_CONFIG_DRIVE_PUSH_PULL,
        PIN_CONFIG_DRIVE_OPEN_DRAIN,
        PIN_CONFIG_DRIVE_OPEN_SOURCE,
-       PIN_CONFIG_INPUT_SCHMITT_DISABLE,
+       PIN_CONFIG_DRIVE_STRENGTH,
+       PIN_CONFIG_INPUT_SCHMITT_ENABLE,
        PIN_CONFIG_INPUT_SCHMITT,
        PIN_CONFIG_INPUT_DEBOUNCE,
        PIN_CONFIG_POWER_SOURCE,
+       PIN_CONFIG_SLEW_RATE,
        PIN_CONFIG_LOW_POWER_MODE,
+       PIN_CONFIG_OUTPUT,
        PIN_CONFIG_END = 0x7FFF,
 };
 
index 04d6700d99afd610a9a002d8ddc70f0dd33b09ca..778804df293f675e505011d0b64b824c2962eae2 100644 (file)
@@ -154,6 +154,7 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 #endif /* CONFIG_OF */
 
 extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
+extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev);
 extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
 #else
 
diff --git a/include/linux/platform_data/lp8755.h b/include/linux/platform_data/lp8755.h
new file mode 100644 (file)
index 0000000..a7fd077
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * LP8755 High Performance Power Management Unit Driver:System Interface Driver
+ *
+ *                     Copyright (C) 2012 Texas Instruments
+ *
+ * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
+ *             G.Shark Jeong <gshark.jeong@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _LP8755_H
+#define _LP8755_H
+
+#include <linux/regulator/consumer.h>
+
+#define LP8755_NAME "lp8755-regulator"
+/*
+ *PWR FAULT : power fault detected
+ *OCP : over current protect activated
+ *OVP : over voltage protect activated
+ *TEMP_WARN : thermal warning
+ *TEMP_SHDN : thermal shutdonw detected
+ *I_LOAD : current measured
+ */
+#define LP8755_EVENT_PWR_FAULT REGULATOR_EVENT_FAIL
+#define LP8755_EVENT_OCP REGULATOR_EVENT_OVER_CURRENT
+#define LP8755_EVENT_OVP 0x10000
+#define LP8755_EVENT_TEMP_WARN 0x2000
+#define LP8755_EVENT_TEMP_SHDN REGULATOR_EVENT_OVER_TEMP
+#define LP8755_EVENT_I_LOAD    0x40000
+
+enum lp8755_bucks {
+       LP8755_BUCK0 = 0,
+       LP8755_BUCK1,
+       LP8755_BUCK2,
+       LP8755_BUCK3,
+       LP8755_BUCK4,
+       LP8755_BUCK5,
+       LP8755_BUCK_MAX,
+};
+
+/**
+ * multiphase configuration options
+ */
+enum lp8755_mphase_config {
+       MPHASE_CONF0,
+       MPHASE_CONF1,
+       MPHASE_CONF2,
+       MPHASE_CONF3,
+       MPHASE_CONF4,
+       MPHASE_CONF5,
+       MPHASE_CONF6,
+       MPHASE_CONF7,
+       MPHASE_CONF8,
+       MPHASE_CONF_MAX
+};
+
+/**
+ * struct lp8755_platform_data
+ * @mphase_type : Multiphase Switcher Configurations.
+ * @buck_data   : buck0~6 init voltage in uV
+ */
+struct lp8755_platform_data {
+       int mphase;
+       struct regulator_init_data *buck_data[LP8755_BUCK_MAX];
+};
+#endif
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
new file mode 100644 (file)
index 0000000..ed9d3b3
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * max6697.h
+ *     Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MAX6697_H
+#define MAX6697_H
+
+#include <linux/types.h>
+
+/*
+ * For all bit masks:
+ * bit 0:    local temperature
+ * bit 1..7: remote temperatures
+ */
+struct max6697_platform_data {
+       bool smbus_timeout_disable;     /* set to disable SMBus timeouts */
+       bool extended_range_enable;     /* set to enable extended temp range */
+       bool beta_compensation;         /* set to enable beta compensation */
+       u8 alert_mask;                  /* set bit to 1 to disable alert */
+       u8 over_temperature_mask;       /* set bit to 1 to disable */
+       u8 resistance_cancellation;     /* set bit to 0 to disable
+                                        * bit mask for MAX6581,
+                                        * boolean for other chips
+                                        */
+       u8 ideality_mask;               /* set bit to 0 to disable */
+       u8 ideality_value;              /* transistor ideality as per
+                                        * MAX6581 datasheet
+                                        */
+};
+
+#endif /* MAX6697_H */
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
new file mode 100644 (file)
index 0000000..fc3f026
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * PM2301 charger driver.
+ *
+ * Copyright (C) 2012 ST Ericsson Corporation
+ *
+ * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_PM2301_H
+#define __LINUX_PM2301_H
+
+/**
+ * struct pm2xxx_bm_charger_parameters - Charger specific parameters
+ * @ac_volt_max:       maximum allowed AC charger voltage in mV
+ * @ac_curr_max:       maximum allowed AC charger current in mA
+ */
+struct pm2xxx_bm_charger_parameters {
+       int ac_volt_max;
+       int ac_curr_max;
+};
+
+/**
+ * struct pm2xxx_bm_data - pm2xxx battery management data
+ * @enable_overshoot    flag to enable VBAT overshoot control
+ * @chg_params   charger parameters
+ */
+struct pm2xxx_bm_data {
+       bool enable_overshoot;
+       const struct pm2xxx_bm_charger_parameters *chg_params;
+};
+
+struct pm2xxx_charger_platform_data {
+       char **supplied_to;
+       size_t num_supplicants;
+       int i2c_bus;
+       const char *label;
+       int irq_number;
+       unsigned int lpn_gpio;
+       int irq_type;
+};
+
+struct pm2xxx_platform_data {
+       struct pm2xxx_charger_platform_data *wall_charger;
+       struct pm2xxx_bm_data *battery;
+};
+
+#endif /* __LINUX_PM2301_H */
index 97a1665eaeafe1f3b5e349e40c7016aa42af0796..8dcc0f46fc0ab44d5d4d34731745a73f42b5eceb 100644 (file)
@@ -75,7 +75,8 @@
 
 /* Supported modes with maximal current limit */
 enum bq2415x_mode {
-       BQ2415X_MODE_NONE,              /* unknown or no charger (100mA) */
+       BQ2415X_MODE_OFF,               /* offline mode (charger disabled) */
+       BQ2415X_MODE_NONE,              /* unknown charger (100mA) */
        BQ2415X_MODE_HOST_CHARGER,      /* usb host/hub charger (500mA) */
        BQ2415X_MODE_DEDICATED_CHARGER, /* dedicated charger (unlimited) */
        BQ2415X_MODE_BOOST,             /* boost mode (charging disabled) */
index 1f0ab90aff00ccbfc5075d49225d9d56ef73e4c8..25c0982eb9b13b0066ef157fba03db61d58834a0 100644 (file)
@@ -54,6 +54,8 @@ enum {
        POWER_SUPPLY_HEALTH_OVERVOLTAGE,
        POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
        POWER_SUPPLY_HEALTH_COLD,
+       POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
+       POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
 };
 
 enum {
index 9afc01e5a0a61ce5f4a9fe1e53ad6e0fb1b8e5bf..86c4b6294713a613fff6d8c3150691967baa6124 100644 (file)
@@ -98,9 +98,6 @@ int no_printk(const char *fmt, ...)
 extern asmlinkage __printf(1, 2)
 void early_printk(const char *fmt, ...);
 
-extern int printk_needs_cpu(int cpu);
-extern void printk_tick(void);
-
 #ifdef CONFIG_PRINTK
 asmlinkage __printf(5, 0)
 int vprintk_emit(int facility, int level,
index a0fc32279fc09d53a94b73b4c9dc89645d637b54..21123902366d8eb2b9b9be26b78e98e8c9badf13 100644 (file)
@@ -82,9 +82,6 @@ int task_handoff_unregister(struct notifier_block * n);
 int profile_event_register(enum profile_type, struct notifier_block * n);
 int profile_event_unregister(enum profile_type, struct notifier_block * n);
 
-int register_timer_hook(int (*hook)(struct pt_regs *));
-void unregister_timer_hook(int (*hook)(struct pt_regs *));
-
 struct pt_regs;
 
 #else
@@ -135,16 +132,6 @@ static inline int profile_event_unregister(enum profile_type t, struct notifier_
 #define profile_handoff_task(a) (0)
 #define profile_munmap(a) do { } while (0)
 
-static inline int register_timer_hook(int (*hook)(struct pt_regs *))
-{
-       return -ENOSYS;
-}
-
-static inline void unregister_timer_hook(int (*hook)(struct pt_regs *))
-{
-       return;
-}
-
 #endif /* CONFIG_PROFILING */
 
 #endif /* _LINUX_PROFILE_H */
index 1693775ecfe8fd298d299ebf1ca925c76e3a2361..89573a33ab3c43ee84cf629d84e5ff3d7f091206 100644 (file)
@@ -45,7 +45,6 @@ extern long arch_ptrace(struct task_struct *child, long request,
 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
 extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
 extern void ptrace_disable(struct task_struct *);
-extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
 extern int ptrace_request(struct task_struct *child, long request,
                          unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
index 2ac60c9cf6448fac839074e752d61f2de7b56c0a..fea49b5da12a99bfa6c87b865a461a2a28e0495b 100644 (file)
@@ -123,9 +123,9 @@ __rb_change_child(struct rb_node *old, struct rb_node *new,
 extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
        void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
 
-static __always_inline void
-rb_erase_augmented(struct rb_node *node, struct rb_root *root,
-                  const struct rb_augment_callbacks *augment)
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                    const struct rb_augment_callbacks *augment)
 {
        struct rb_node *child = node->rb_right, *tmp = node->rb_left;
        struct rb_node *parent, *rebalance;
@@ -217,6 +217,14 @@ rb_erase_augmented(struct rb_node *node, struct rb_root *root,
        }
 
        augment->propagate(tmp, NULL);
+       return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+                  const struct rb_augment_callbacks *augment)
+{
+       struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
        if (rebalance)
                __rb_erase_color(rebalance, root, augment->rotate);
 }
index 275aa3f1062d4b8db48be4f5201dce83b6c9bcb4..b758ce17b309d933face58b1a557ded7eae6914e 100644 (file)
@@ -53,7 +53,10 @@ extern int rcutorture_runnable; /* for sysctl */
 extern void rcutorture_record_test_transition(void);
 extern void rcutorture_record_progress(unsigned long vernum);
 extern void do_trace_rcu_torture_read(char *rcutorturename,
-                                     struct rcu_head *rhp);
+                                     struct rcu_head *rhp,
+                                     unsigned long secs,
+                                     unsigned long c_old,
+                                     unsigned long c);
 #else
 static inline void rcutorture_record_test_transition(void)
 {
@@ -63,9 +66,13 @@ static inline void rcutorture_record_progress(unsigned long vernum)
 }
 #ifdef CONFIG_RCU_TRACE
 extern void do_trace_rcu_torture_read(char *rcutorturename,
-                                     struct rcu_head *rhp);
+                                     struct rcu_head *rhp,
+                                     unsigned long secs,
+                                     unsigned long c_old,
+                                     unsigned long c);
 #else
-#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
+       do { } while (0)
 #endif
 #endif
 
@@ -749,7 +756,7 @@ static inline void rcu_preempt_sleep_check(void)
  * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
  * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
  * be preempted, but explicit blocking is illegal.  Finally, in preemptible
- * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
+ * RCU implementations in real-time (with -rt patchset) kernel builds,
  * RCU read-side critical sections may be preempted and they may also
  * block, but only when acquiring spinlocks that are subject to priority
  * inheritance.
index b7e95bf942c9ce8936f61c61efb0261f9c09ad37..bf77dfdabef9559b021503b9163fd548ac8553de 100644 (file)
@@ -28,7 +28,8 @@ struct regmap_range_cfg;
 enum regcache_type {
        REGCACHE_NONE,
        REGCACHE_RBTREE,
-       REGCACHE_COMPRESSED
+       REGCACHE_COMPRESSED,
+       REGCACHE_FLAT,
 };
 
 /**
@@ -127,7 +128,18 @@ typedef void (*regmap_unlock)(void *);
  * @lock_arg:    this field is passed as the only argument of lock/unlock
  *               functions (ignored in case regular lock/unlock functions
  *               are not overridden).
- *
+ * @reg_read:    Optional callback that if filled will be used to perform
+ *               all the reads from the registers. Should only be provided for
+ *               devices whos read operation cannot be represented as a simple read
+ *               operation on a bus such as SPI, I2C, etc. Most of the devices do
+ *               not need this.
+ * @reg_write:   Same as above for writing.
+ * @fast_io:     Register IO is fast. Use a spinlock instead of a mutex
+ *               to perform locking. This field is ignored if custom lock/unlock
+ *               functions are used (see fields lock/unlock of struct regmap_config).
+ *               This field is a duplicate of a similar file in
+ *               'struct regmap_bus' and serves exact same purpose.
+ *                Use it only for "no-bus" cases.
  * @max_register: Optional, specifies the maximum valid register index.
  * @wr_table:     Optional, points to a struct regmap_access_table specifying
  *                valid ranges for write access.
@@ -177,6 +189,11 @@ struct regmap_config {
        regmap_unlock unlock;
        void *lock_arg;
 
+       int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+       int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+       bool fast_io;
+
        unsigned int max_register;
        const struct regmap_access_table *wr_table;
        const struct regmap_access_table *rd_table;
@@ -235,14 +252,21 @@ struct regmap_range_cfg {
        unsigned int window_len;
 };
 
+struct regmap_async;
+
 typedef int (*regmap_hw_write)(void *context, const void *data,
                               size_t count);
 typedef int (*regmap_hw_gather_write)(void *context,
                                      const void *reg, size_t reg_len,
                                      const void *val, size_t val_len);
+typedef int (*regmap_hw_async_write)(void *context,
+                                    const void *reg, size_t reg_len,
+                                    const void *val, size_t val_len,
+                                    struct regmap_async *async);
 typedef int (*regmap_hw_read)(void *context,
                              const void *reg_buf, size_t reg_size,
                              void *val_buf, size_t val_size);
+typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
 typedef void (*regmap_hw_free_context)(void *context);
 
 /**
@@ -255,8 +279,11 @@ typedef void (*regmap_hw_free_context)(void *context);
  * @write: Write operation.
  * @gather_write: Write operation with split register/value, return -ENOTSUPP
  *                if not implemented  on a given device.
+ * @async_write: Write operation which completes asynchronously, optional and
+ *               must serialise with respect to non-async I/O.
  * @read: Read operation.  Data is returned in the buffer used to transmit
  *         data.
+ * @async_alloc: Allocate a regmap_async() structure.
  * @read_flag_mask: Mask to be set in the top byte of the register when doing
  *                  a read.
  * @reg_format_endian_default: Default endianness for formatted register
@@ -265,13 +292,16 @@ typedef void (*regmap_hw_free_context)(void *context);
  * @val_format_endian_default: Default endianness for formatted register
  *     values. Used when the regmap_config specifies DEFAULT. If this is
  *     DEFAULT, BIG is assumed.
+ * @async_size: Size of struct used for async work.
  */
 struct regmap_bus {
        bool fast_io;
        regmap_hw_write write;
        regmap_hw_gather_write gather_write;
+       regmap_hw_async_write async_write;
        regmap_hw_read read;
        regmap_hw_free_context free_context;
+       regmap_hw_async_alloc async_alloc;
        u8 read_flag_mask;
        enum regmap_endian reg_format_endian_default;
        enum regmap_endian val_format_endian_default;
@@ -285,9 +315,9 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
                               const struct regmap_config *config);
 struct regmap *regmap_init_spi(struct spi_device *dev,
                               const struct regmap_config *config);
-struct regmap *regmap_init_mmio(struct device *dev,
-                               void __iomem *regs,
-                               const struct regmap_config *config);
+struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+                                   void __iomem *regs,
+                                   const struct regmap_config *config);
 
 struct regmap *devm_regmap_init(struct device *dev,
                                const struct regmap_bus *bus,
@@ -297,9 +327,44 @@ struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
                                    const struct regmap_config *config);
 struct regmap *devm_regmap_init_spi(struct spi_device *dev,
                                    const struct regmap_config *config);
-struct regmap *devm_regmap_init_mmio(struct device *dev,
-                                    void __iomem *regs,
-                                    const struct regmap_config *config);
+struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+                                        void __iomem *regs,
+                                        const struct regmap_config *config);
+
+/**
+ * regmap_init_mmio(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+static inline struct regmap *regmap_init_mmio(struct device *dev,
+                                       void __iomem *regs,
+                                       const struct regmap_config *config)
+{
+       return regmap_init_mmio_clk(dev, NULL, regs, config);
+}
+
+/**
+ * devm_regmap_init_mmio(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap.  The regmap will be automatically freed by the
+ * device management code.
+ */
+static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
+                                       void __iomem *regs,
+                                       const struct regmap_config *config)
+{
+       return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
+}
 
 void regmap_exit(struct regmap *map);
 int regmap_reinit_cache(struct regmap *map,
@@ -310,6 +375,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
                     const void *val, size_t val_len);
 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
                        size_t val_count);
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+                          const void *val, size_t val_len);
 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
 int regmap_raw_read(struct regmap *map, unsigned int reg,
                    void *val, size_t val_len);
@@ -321,6 +388,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
                             unsigned int mask, unsigned int val,
                             bool *change);
 int regmap_get_val_bytes(struct regmap *map);
+int regmap_async_complete(struct regmap *map);
 
 int regcache_sync(struct regmap *map);
 int regcache_sync_region(struct regmap *map, unsigned int min,
@@ -381,6 +449,7 @@ struct regmap_irq_chip {
        unsigned int wake_base;
        unsigned int irq_reg_stride;
        unsigned int mask_invert;
+       unsigned int wake_invert;
        bool runtime_pm;
 
        int num_regs;
@@ -422,6 +491,13 @@ static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
        return -EINVAL;
 }
 
+static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+                                        const void *val, size_t val_len)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+       return -EINVAL;
+}
+
 static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
                                    const void *val, size_t val_count)
 {
@@ -500,6 +576,11 @@ static inline void regcache_mark_dirty(struct regmap *map)
        WARN_ONCE(1, "regmap API is disabled");
 }
 
+static inline void regmap_async_complete(struct regmap *map)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+}
+
 static inline int regmap_register_patch(struct regmap *map,
                                        const struct reg_default *regs,
                                        int num_regs)
index d10bb0f39c5e72fd6bb7747e47761d062f75c981..23070fd83872004cdb91367ef1857c0d6ca04a52 100644 (file)
@@ -193,6 +193,10 @@ enum regulator_type {
  *
  * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
  * @vsel_mask: Mask for register bitfield used for selector
+ * @apply_reg: Register for initiate voltage change on the output when
+ *                using regulator_set_voltage_sel_regmap
+ * @apply_bit: Register bitfield used for initiate voltage change on the
+ *                output when using regulator_set_voltage_sel_regmap
  * @enable_reg: Register for control when using regmap enable/disable ops
  * @enable_mask: Mask for control when using regmap enable/disable ops
  *
@@ -218,6 +222,8 @@ struct regulator_desc {
 
        unsigned int vsel_reg;
        unsigned int vsel_mask;
+       unsigned int apply_reg;
+       unsigned int apply_bit;
        unsigned int enable_reg;
        unsigned int enable_mask;
        unsigned int bypass_reg;
index 519777e3fa019f87883224154e875003ca56bbdd..1342e69542f345dada550d298f7bacf6f6fe75e9 100644 (file)
@@ -167,6 +167,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
 unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
 unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
 
 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
index 9531845c419f8329000b8927a341057bb2a64e6d..11d05f9fe8b6485b43ecaa00395f36919fc13e33 100644 (file)
@@ -138,6 +138,7 @@ extern void rtc_device_unregister(struct rtc_device *rtc);
 extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
 extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
 extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
+extern int rtc_set_ntp_time(struct timespec now);
 int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
 extern int rtc_read_alarm(struct rtc_device *rtc,
                        struct rtc_wkalrm *alrm);
index 54bd7cd7ecbd11449b9e15b49f9ab7e208e96413..8da67d625e13fc888413847da4e2a7ae560b3d86 100644 (file)
@@ -125,8 +125,17 @@ extern void downgrade_write(struct rw_semaphore *sem);
  */
 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+
+# define down_write_nest_lock(sem, nest_lock)                  \
+do {                                                           \
+       typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+       _down_write_nest_lock(sem, &(nest_lock)->dep_map);      \
+} while (0);
+
 #else
 # define down_read_nested(sem, subclass)               down_read(sem)
+# define down_write_nest_lock(sem, nest_lock)  down_write(sem)
 # define down_write_nested(sem, subclass)      down_write(sem)
 #endif
 
index 206bb089c06b5541a889291f7938e80b3b480678..e4112aad296408061d1a85d8e26fae550740e608 100644 (file)
@@ -304,19 +304,6 @@ static inline void lockup_detector_init(void)
 }
 #endif
 
-#ifdef CONFIG_DETECT_HUNG_TASK
-extern unsigned int  sysctl_hung_task_panic;
-extern unsigned long sysctl_hung_task_check_count;
-extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_warnings;
-extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
-                                        void __user *buffer,
-                                        size_t *lenp, loff_t *ppos);
-#else
-/* Avoid need for ifdefs elsewhere in the code */
-enum { sysctl_hung_task_timeout_secs = 0 };
-#endif
-
 /* Attach to any functions which should be ignored in wchan output. */
 #define __sched                __attribute__((__section__(".sched.text")))
 
@@ -338,23 +325,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
 struct nsproxy;
 struct user_namespace;
 
-/*
- * Default maximum number of active map areas, this limits the number of vmas
- * per mm struct. Users can overwrite this number by sysctl but there is a
- * problem.
- *
- * When a program's coredump is generated as ELF format, a section is created
- * per a vma. In ELF, the number of sections is represented in unsigned short.
- * This means the number of sections should be smaller than 65535 at coredump.
- * Because the kernel adds some informative sections to a image of program at
- * generating coredump, we need some margin. The number of extra sections is
- * 1-3 now and depends on arch. We use "5" as safe margin, here.
- */
-#define MAPCOUNT_ELF_CORE_MARGIN       (5)
-#define DEFAULT_MAX_MAP_COUNT  (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
-
-extern int sysctl_max_map_count;
-
 #include <linux/aio.h>
 
 #ifdef CONFIG_MMU
@@ -1194,6 +1164,7 @@ struct sched_entity {
        /* rq "owned" by this entity/group: */
        struct cfs_rq           *my_q;
 #endif
+
 /*
  * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
  * removed when useful for applications beyond shares distribution (e.g.
@@ -1208,6 +1179,7 @@ struct sched_entity {
 struct sched_rt_entity {
        struct list_head run_list;
        unsigned long timeout;
+       unsigned long watchdog_stamp;
        unsigned int time_slice;
 
        struct sched_rt_entity *back;
@@ -1220,11 +1192,6 @@ struct sched_rt_entity {
 #endif
 };
 
-/*
- * default timeslice is 100 msecs (used only for SCHED_RR tasks).
- * Timeslices get refilled after they expire.
- */
-#define RR_TIMESLICE           (100 * HZ / 1000)
 
 struct rcu_node;
 
@@ -1367,6 +1334,15 @@ struct task_struct {
        cputime_t gtime;
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        struct cputime prev_cputime;
+#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+       seqlock_t vtime_seqlock;
+       unsigned long long vtime_snap;
+       enum {
+               VTIME_SLEEPING = 0,
+               VTIME_USER,
+               VTIME_SYS,
+       } vtime_snap_whence;
 #endif
        unsigned long nvcsw, nivcsw; /* context switch counts */
        struct timespec start_time;             /* monotonic time */
@@ -1622,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled)
 }
 #endif
 
-/*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
- * values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space.  This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
- */
-
-#define MAX_USER_RT_PRIO       100
-#define MAX_RT_PRIO            MAX_USER_RT_PRIO
-
-#define MAX_PRIO               (MAX_RT_PRIO + 40)
-#define DEFAULT_PRIO           (MAX_RT_PRIO + 20)
-
-static inline int rt_prio(int prio)
-{
-       if (unlikely(prio < MAX_RT_PRIO))
-               return 1;
-       return 0;
-}
-
-static inline int rt_task(struct task_struct *p)
-{
-       return rt_prio(p->prio);
-}
-
 static inline struct pid *task_pid(struct task_struct *task)
 {
        return task->pids[PIDTYPE_PID].pid;
@@ -1792,6 +1737,37 @@ static inline void put_task_struct(struct task_struct *t)
                __put_task_struct(t);
 }
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void task_cputime(struct task_struct *t,
+                        cputime_t *utime, cputime_t *stime);
+extern void task_cputime_scaled(struct task_struct *t,
+                               cputime_t *utimescaled, cputime_t *stimescaled);
+extern cputime_t task_gtime(struct task_struct *t);
+#else
+static inline void task_cputime(struct task_struct *t,
+                               cputime_t *utime, cputime_t *stime)
+{
+       if (utime)
+               *utime = t->utime;
+       if (stime)
+               *stime = t->stime;
+}
+
+static inline void task_cputime_scaled(struct task_struct *t,
+                                      cputime_t *utimescaled,
+                                      cputime_t *stimescaled)
+{
+       if (utimescaled)
+               *utimescaled = t->utimescaled;
+       if (stimescaled)
+               *stimescaled = t->stimescaled;
+}
+
+static inline cputime_t task_gtime(struct task_struct *t)
+{
+       return t->gtime;
+}
+#endif
 extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
 extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
 
@@ -1810,6 +1786,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
 #define PF_MEMALLOC    0x00000800      /* Allocating memory */
 #define PF_NPROC_EXCEEDED 0x00001000   /* set_user noticed that RLIMIT_NPROC was exceeded */
 #define PF_USED_MATH   0x00002000      /* if unset the fpu must be initialized before use */
+#define PF_USED_ASYNC  0x00004000      /* used async_schedule*(), used by module init */
 #define PF_NOFREEZE    0x00008000      /* this thread should not be frozen */
 #define PF_FROZEN      0x00010000      /* frozen for system suspend */
 #define PF_FSTRANS     0x00020000      /* inside a filesystem transaction */
@@ -2032,58 +2009,7 @@ extern void wake_up_idle_cpu(int cpu);
 static inline void wake_up_idle_cpu(int cpu) { }
 #endif
 
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-extern unsigned int sysctl_sched_child_runs_first;
-
-enum sched_tunable_scaling {
-       SCHED_TUNABLESCALING_NONE,
-       SCHED_TUNABLESCALING_LOG,
-       SCHED_TUNABLESCALING_LINEAR,
-       SCHED_TUNABLESCALING_END,
-};
-extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_period_reset;
-extern unsigned int sysctl_numa_balancing_scan_size;
-extern unsigned int sysctl_numa_balancing_settle_count;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_migration_cost;
-extern unsigned int sysctl_sched_nr_migrate;
-extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
-extern unsigned int sysctl_sched_shares_window;
-
-int sched_proc_update_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *length,
-               loff_t *ppos);
-#endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
-       return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
-       return 1;
-}
-#endif
-extern unsigned int sysctl_sched_rt_period;
-extern int sysctl_sched_rt_runtime;
-
-int sched_rt_handler(struct ctl_table *table, int write,
-               void __user *buffer, size_t *lenp,
-               loff_t *ppos);
-
 #ifdef CONFIG_SCHED_AUTOGROUP
-extern unsigned int sysctl_sched_autogroup_enabled;
-
 extern void sched_autogroup_create_attach(struct task_struct *p);
 extern void sched_autogroup_detach(struct task_struct *p);
 extern void sched_autogroup_fork(struct signal_struct *sig);
@@ -2099,30 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
 static inline void sched_autogroup_exit(struct signal_struct *sig) { }
 #endif
 
-#ifdef CONFIG_CFS_BANDWIDTH
-extern unsigned int sysctl_sched_cfs_bandwidth_slice;
-#endif
-
-#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(struct task_struct *p);
-extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern void rt_mutex_adjust_pi(struct task_struct *p);
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
-       return tsk->pi_blocked_on != NULL;
-}
-#else
-static inline int rt_mutex_getprio(struct task_struct *p)
-{
-       return p->normal_prio;
-}
-# define rt_mutex_adjust_pi(p)         do { } while (0)
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
-       return false;
-}
-#endif
-
 extern bool yield_to(struct task_struct *p, bool preempt);
 extern void set_user_nice(struct task_struct *p, long nice);
 extern int task_prio(const struct task_struct *p);
@@ -2713,7 +2615,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
 extern void recalc_sigpending_and_wake(struct task_struct *t);
 extern void recalc_sigpending(void);
 
-extern void signal_wake_up(struct task_struct *t, int resume_stopped);
+extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
+
+static inline void signal_wake_up(struct task_struct *t, bool resume)
+{
+       signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+}
+static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
+{
+       signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+}
 
 /*
  * Wrappers for p->thread_info->cpu access. No-op on UP.
@@ -2743,14 +2654,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 
-extern void normalize_rt_tasks(void);
-
 #ifdef CONFIG_CGROUP_SCHED
 
 extern struct task_group root_task_group;
 
 extern struct task_group *sched_create_group(struct task_group *parent);
+extern void sched_online_group(struct task_group *tg,
+                              struct task_group *parent);
 extern void sched_destroy_group(struct task_group *tg);
+extern void sched_offline_group(struct task_group *tg);
 extern void sched_move_task(struct task_struct *tsk);
 #ifdef CONFIG_FAIR_GROUP_SCHED
 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
new file mode 100644 (file)
index 0000000..94e19ea
--- /dev/null
@@ -0,0 +1,58 @@
+#ifndef _SCHED_RT_H
+#define _SCHED_RT_H
+
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space.  This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO       100
+#define MAX_RT_PRIO            MAX_USER_RT_PRIO
+
+#define MAX_PRIO               (MAX_RT_PRIO + 40)
+#define DEFAULT_PRIO           (MAX_RT_PRIO + 20)
+
+static inline int rt_prio(int prio)
+{
+       if (unlikely(prio < MAX_RT_PRIO))
+               return 1;
+       return 0;
+}
+
+static inline int rt_task(struct task_struct *p)
+{
+       return rt_prio(p->prio);
+}
+
+#ifdef CONFIG_RT_MUTEXES
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+       return tsk->pi_blocked_on != NULL;
+}
+#else
+static inline int rt_mutex_getprio(struct task_struct *p)
+{
+       return p->normal_prio;
+}
+# define rt_mutex_adjust_pi(p)         do { } while (0)
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+       return false;
+}
+#endif
+
+extern void normalize_rt_tasks(void);
+
+
+#endif /* _SCHED_RT_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
new file mode 100644 (file)
index 0000000..d2bb0ae
--- /dev/null
@@ -0,0 +1,110 @@
+#ifndef _SCHED_SYSCTL_H
+#define _SCHED_SYSCTL_H
+
+#ifdef CONFIG_DETECT_HUNG_TASK
+extern unsigned int  sysctl_hung_task_panic;
+extern unsigned long sysctl_hung_task_check_count;
+extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_warnings;
+extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+                                        void __user *buffer,
+                                        size_t *lenp, loff_t *ppos);
+#else
+/* Avoid need for ifdefs elsewhere in the code */
+enum { sysctl_hung_task_timeout_secs = 0 };
+#endif
+
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN       (5)
+#define DEFAULT_MAX_MAP_COUNT  (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+extern int sysctl_max_map_count;
+
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_child_runs_first;
+
+enum sched_tunable_scaling {
+       SCHED_TUNABLESCALING_NONE,
+       SCHED_TUNABLESCALING_LOG,
+       SCHED_TUNABLESCALING_LINEAR,
+       SCHED_TUNABLESCALING_END,
+};
+extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_period_reset;
+extern unsigned int sysctl_numa_balancing_scan_size;
+extern unsigned int sysctl_numa_balancing_settle_count;
+
+#ifdef CONFIG_SCHED_DEBUG
+extern unsigned int sysctl_sched_migration_cost;
+extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_time_avg;
+extern unsigned int sysctl_timer_migration;
+extern unsigned int sysctl_sched_shares_window;
+
+int sched_proc_update_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *length,
+               loff_t *ppos);
+#endif
+#ifdef CONFIG_SCHED_DEBUG
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+       return sysctl_timer_migration;
+}
+#else
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+       return 1;
+}
+#endif
+
+/*
+ *  control realtime throttling:
+ *
+ *  /proc/sys/kernel/sched_rt_period_us
+ *  /proc/sys/kernel/sched_rt_runtime_us
+ */
+extern unsigned int sysctl_sched_rt_period;
+extern int sysctl_sched_rt_runtime;
+
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+#endif
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+extern unsigned int sysctl_sched_autogroup_enabled;
+#endif
+
+/*
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define RR_TIMESLICE           (100 * HZ / 1000)
+
+extern int sched_rr_timeslice;
+
+extern int sched_rr_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
+
+extern int sched_rt_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos);
+
+#endif /* _SCHED_SYSCTL_H */
index 0f6afc657f778f2e682e7fb108e97b6788a53973..eee7478cda701ddeabc00150f607f368a4239350 100644 (file)
@@ -989,17 +989,29 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     tells the LSM to decrement the number of secmark labeling rules loaded
  * @req_classify_flow:
  *     Sets the flow's sid to the openreq sid.
+ * @tun_dev_alloc_security:
+ *     This hook allows a module to allocate a security structure for a TUN
+ *     device.
+ *     @security pointer to a security structure pointer.
+ *     Returns a zero on success, negative values on failure.
+ * @tun_dev_free_security:
+ *     This hook allows a module to free the security structure for a TUN
+ *     device.
+ *     @security pointer to the TUN device's security structure
  * @tun_dev_create:
  *     Check permissions prior to creating a new TUN device.
- * @tun_dev_post_create:
- *     This hook allows a module to update or allocate a per-socket security
- *     structure.
- *     @sk contains the newly created sock structure.
+ * @tun_dev_attach_queue:
+ *     Check permissions prior to attaching to a TUN device queue.
+ *     @security pointer to the TUN device's security structure.
  * @tun_dev_attach:
- *     Check permissions prior to attaching to a persistent TUN device.  This
- *     hook can also be used by the module to update any security state
+ *     This hook can be used by the module to update any security state
  *     associated with the TUN device's sock structure.
  *     @sk contains the existing sock structure.
+ *     @security pointer to the TUN device's security structure.
+ * @tun_dev_open:
+ *     This hook can be used by the module to update any security state
+ *     associated with the TUN device's security structure.
+ *     @security pointer to the TUN devices's security structure.
  *
  * Security hooks for XFRM operations.
  *
@@ -1620,9 +1632,12 @@ struct security_operations {
        void (*secmark_refcount_inc) (void);
        void (*secmark_refcount_dec) (void);
        void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
-       int (*tun_dev_create)(void);
-       void (*tun_dev_post_create)(struct sock *sk);
-       int (*tun_dev_attach)(struct sock *sk);
+       int (*tun_dev_alloc_security) (void **security);
+       void (*tun_dev_free_security) (void *security);
+       int (*tun_dev_create) (void);
+       int (*tun_dev_attach_queue) (void *security);
+       int (*tun_dev_attach) (struct sock *sk, void *security);
+       int (*tun_dev_open) (void *security);
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2566,9 +2581,12 @@ void security_inet_conn_established(struct sock *sk,
 int security_secmark_relabel_packet(u32 secid);
 void security_secmark_refcount_inc(void);
 void security_secmark_refcount_dec(void);
+int security_tun_dev_alloc_security(void **security);
+void security_tun_dev_free_security(void *security);
 int security_tun_dev_create(void);
-void security_tun_dev_post_create(struct sock *sk);
-int security_tun_dev_attach(struct sock *sk);
+int security_tun_dev_attach_queue(void *security);
+int security_tun_dev_attach(struct sock *sk, void *security);
+int security_tun_dev_open(void *security);
 
 #else  /* CONFIG_SECURITY_NETWORK */
 static inline int security_unix_stream_connect(struct sock *sock,
@@ -2733,16 +2751,31 @@ static inline void security_secmark_refcount_dec(void)
 {
 }
 
+static inline int security_tun_dev_alloc_security(void **security)
+{
+       return 0;
+}
+
+static inline void security_tun_dev_free_security(void *security)
+{
+}
+
 static inline int security_tun_dev_create(void)
 {
        return 0;
 }
 
-static inline void security_tun_dev_post_create(struct sock *sk)
+static inline int security_tun_dev_attach_queue(void *security)
+{
+       return 0;
+}
+
+static inline int security_tun_dev_attach(struct sock *sk, void *security)
 {
+       return 0;
 }
 
-static inline int security_tun_dev_attach(struct sock *sk)
+static inline int security_tun_dev_open(void *security)
 {
        return 0;
 }
index e0106d8581d380c2bee45d10141297cb8ac0b4b0..c65dee059913c8d429a614bf3c9b16e088369216 100644 (file)
@@ -14,6 +14,8 @@ struct smpboot_thread_data;
  * @thread_should_run: Check whether the thread should run or not. Called with
  *                     preemption disabled.
  * @thread_fn:         The associated thread function
+ * @create:            Optional setup function, called when the thread gets
+ *                     created (Not called from the thread context)
  * @setup:             Optional setup function, called when the thread gets
  *                     operational the first time
  * @cleanup:           Optional cleanup function, called when the thread
@@ -22,6 +24,7 @@ struct smpboot_thread_data;
  *                     parked (cpu offline)
  * @unpark:            Optional unpark function, called when the thread is
  *                     unparked (cpu online)
+ * @selfparking:       Thread is not parked by the park function.
  * @thread_comm:       The base name of the thread
  */
 struct smp_hotplug_thread {
@@ -29,10 +32,12 @@ struct smp_hotplug_thread {
        struct list_head                list;
        int                             (*thread_should_run)(unsigned int cpu);
        void                            (*thread_fn)(unsigned int cpu);
+       void                            (*create)(unsigned int cpu);
        void                            (*setup)(unsigned int cpu);
        void                            (*cleanup)(unsigned int cpu, bool online);
        void                            (*park)(unsigned int cpu);
        void                            (*unpark)(unsigned int cpu);
+       bool                            selfparking;
        const char                      *thread_comm;
 };
 
index 6eb691b083581ff88ffd9e7c3da73bc5aa2170fd..04f4121a23ae2b6f5854bf35e7678cd9f93fe991 100644 (file)
@@ -151,30 +151,14 @@ void srcu_barrier(struct srcu_struct *sp);
  * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
  * and while lockdep is disabled.
  *
- * Note that if the CPU is in the idle loop from an RCU point of view
- * (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then srcu_read_lock_held() returns false even if
- * the CPU did an srcu_read_lock().  The reason for this is that RCU
- * ignores CPUs that are in such a section, considering these as in
- * extended quiescent state, so such a CPU is effectively never in an
- * RCU read-side critical section regardless of what RCU primitives it
- * invokes.  This state of affairs is required --- we need to keep an
- * RCU-free window in idle where the CPU may possibly enter into low
- * power mode. This way we can notice an extended quiescent state to
- * other CPUs that started a grace period. Otherwise we would delay any
- * grace period as long as we run in the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * Note that SRCU is based on its own statemachine and it doesn't
+ * relies on normal RCU, it can be called from the CPU which
+ * is in the idle loop from an RCU point of view or offline.
  */
 static inline int srcu_read_lock_held(struct srcu_struct *sp)
 {
        if (!debug_lockdep_rcu_enabled())
                return 1;
-       if (rcu_is_cpu_idle())
-               return 0;
-       if (!rcu_lockdep_current_cpu_online())
-               return 0;
        return lock_is_held(&sp->dep_map);
 }
 
@@ -236,8 +220,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
        int retval = __srcu_read_lock(sp);
 
        rcu_lock_acquire(&(sp)->dep_map);
-       rcu_lockdep_assert(!rcu_is_cpu_idle(),
-                          "srcu_read_lock() used illegally while idle");
        return retval;
 }
 
@@ -251,8 +233,6 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
 static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
        __releases(sp)
 {
-       rcu_lockdep_assert(!rcu_is_cpu_idle(),
-                          "srcu_read_unlock() used illegally while idle");
        rcu_lock_release(&(sp)->dep_map);
        __srcu_read_unlock(sp, idx);
 }
index 1a6567b48492d3dd782b0646a9d87d496b8c95f1..553272e6af554844fbea920f0c3b08707c52e13a 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/clockchips.h>
 #include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
@@ -122,13 +124,26 @@ static inline int tick_oneshot_mode_active(void) { return 0; }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
 # ifdef CONFIG_NO_HZ
+DECLARE_PER_CPU(struct tick_sched, tick_cpu_sched);
+
+static inline int tick_nohz_tick_stopped(void)
+{
+       return __this_cpu_read(tick_cpu_sched.tick_stopped);
+}
+
 extern void tick_nohz_idle_enter(void);
 extern void tick_nohz_idle_exit(void);
 extern void tick_nohz_irq_exit(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
-# else
+
+# else /* !CONFIG_NO_HZ */
+static inline int tick_nohz_tick_stopped(void)
+{
+       return 0;
+}
+
 static inline void tick_nohz_idle_enter(void) { }
 static inline void tick_nohz_idle_exit(void) { }
 
index 4d358e9d10f100757176df607b502a451aafc6bb..a3ab6a814a9cb0d6093875b36d0e62207fefb09e 100644 (file)
@@ -115,8 +115,20 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
        return true;
 }
 
+extern bool persistent_clock_exist;
+
+#ifdef ALWAYS_USE_PERSISTENT_CLOCK
+#define has_persistent_clock() true
+#else
+static inline bool has_persistent_clock(void)
+{
+       return persistent_clock_exist;
+}
+#endif
+
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_boot_clock(struct timespec *ts);
+extern int persistent_clock_is_local;
 extern int update_persistent_clock(struct timespec now);
 void timekeeping_init(void);
 extern int timekeeping_suspended;
@@ -158,6 +170,7 @@ extern int do_setitimer(int which, struct itimerval *value,
                        struct itimerval *ovalue);
 extern unsigned int alarm_setitimer(unsigned int seconds);
 extern int do_getitimer(int which, struct itimerval *value);
+extern int __getnstimeofday(struct timespec *tv);
 extern void getnstimeofday(struct timespec *tv);
 extern void getrawmonotonic(struct timespec *ts);
 extern void getnstime_raw_and_real(struct timespec *ts_raw,
index 44893e5ec8f74c6d3bd358cb250a24c5aea25541..3251965bf4cc704378939e7f0080f97625e2cfd3 100644 (file)
@@ -23,12 +23,15 @@ static inline void bacct_add_tsk(struct user_namespace *user_ns,
 #ifdef CONFIG_TASK_XACCT
 extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p);
 extern void acct_update_integrals(struct task_struct *tsk);
+extern void acct_account_cputime(struct task_struct *tsk);
 extern void acct_clear_integrals(struct task_struct *tsk);
 #else
 static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
 {}
 static inline void acct_update_integrals(struct task_struct *tsk)
 {}
+static inline void acct_account_cputime(struct task_struct *tsk)
+{}
 static inline void acct_clear_integrals(struct task_struct *tsk)
 {}
 #endif /* CONFIG_TASK_XACCT */
index 4f628a6fc5b415cb763e9fd3b9260526649a7436..02b83db8e2c5d2f330407a7f76c42e7ff0ac11c2 100644 (file)
@@ -35,13 +35,20 @@ struct inode;
 # include <asm/uprobes.h>
 #endif
 
+#define UPROBE_HANDLER_REMOVE          1
+#define UPROBE_HANDLER_MASK            1
+
+enum uprobe_filter_ctx {
+       UPROBE_FILTER_REGISTER,
+       UPROBE_FILTER_UNREGISTER,
+       UPROBE_FILTER_MMAP,
+};
+
 struct uprobe_consumer {
        int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
-       /*
-        * filter is optional; If a filter exists, handler is run
-        * if and only if filter returns true.
-        */
-       bool (*filter)(struct uprobe_consumer *self, struct task_struct *task);
+       bool (*filter)(struct uprobe_consumer *self,
+                               enum uprobe_filter_ctx ctx,
+                               struct mm_struct *mm);
 
        struct uprobe_consumer *next;
 };
@@ -94,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign
 extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
 extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
 extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
 extern int uprobe_mmap(struct vm_area_struct *vma);
 extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -117,6 +125,11 @@ uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
        return -ENOSYS;
 }
+static inline int
+uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
+{
+       return -ENOSYS;
+}
 static inline void
 uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
 {
index 689b14b26c8d84343d8271b87d9aefca00b20904..4d22d0f6167aa49b653c8d942c2e00d61be689b8 100644 (file)
@@ -357,6 +357,8 @@ struct usb_bus {
        int bandwidth_int_reqs;         /* number of Interrupt requests */
        int bandwidth_isoc_reqs;        /* number of Isoc. requests */
 
+       unsigned resuming_ports;        /* bit array: resuming root-hub ports */
+
 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
        struct mon_bus *mon_bus;        /* non-null when associated */
        int monitored;                  /* non-zero when monitored */
index 608050b2545f917f790fca91bc9f89042626752c..0a78df5f6cfd23d616ecd4572b6079d1f8605dd2 100644 (file)
@@ -430,6 +430,9 @@ extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
 extern void usb_wakeup_notification(struct usb_device *hdev,
                unsigned int portnum);
 
+extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
+extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
+
 /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
 #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
 #define        usb_dotoggle(dev, ep, out)  ((dev)->toggle[out] ^= (1 << (ep)))
index bd45eb7bedc8f365ab70704c552e93041af4ac94..0e5ac93bab101bea0526ee3b54f52ed3b0f72099 100644 (file)
@@ -33,6 +33,7 @@ struct usbnet {
        wait_queue_head_t       *wait;
        struct mutex            phy_mutex;
        unsigned char           suspend_count;
+       unsigned char           pkt_cnt, pkt_err;
 
        /* i/o info: pipes etc */
        unsigned                in, out;
@@ -70,6 +71,7 @@ struct usbnet {
 #              define EVENT_DEV_OPEN   7
 #              define EVENT_DEVICE_REPORT_IDLE 8
 #              define EVENT_NO_RUNTIME_PM      9
+#              define EVENT_RX_KILL    10
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -107,6 +109,7 @@ struct driver_info {
  */
 #define FLAG_MULTI_PACKET      0x2000
 #define FLAG_RX_ASSEMBLE       0x4000  /* rx packets may span >1 frames */
+#define FLAG_NOARP             0x8000  /* device can't do ARP */
 
        /* init device ... can sleep, or cause probe() failure */
        int     (*bind)(struct usbnet *, struct usb_interface *);
index ae30ab58431ab7fd30971fbee3f655a3c8aeb2b1..71a5782d8c592fc027cd1e0ce7c4c5a99787c11f 100644 (file)
@@ -6,15 +6,46 @@ struct task_struct;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 extern void vtime_task_switch(struct task_struct *prev);
 extern void vtime_account_system(struct task_struct *tsk);
-extern void vtime_account_system_irqsafe(struct task_struct *tsk);
 extern void vtime_account_idle(struct task_struct *tsk);
 extern void vtime_account_user(struct task_struct *tsk);
-extern void vtime_account(struct task_struct *tsk);
-#else
+extern void vtime_account_irq_enter(struct task_struct *tsk);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+static inline bool vtime_accounting_enabled(void) { return true; }
+#endif
+
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
 static inline void vtime_task_switch(struct task_struct *prev) { }
 static inline void vtime_account_system(struct task_struct *tsk) { }
-static inline void vtime_account_system_irqsafe(struct task_struct *tsk) { }
-static inline void vtime_account(struct task_struct *tsk) { }
+static inline void vtime_account_user(struct task_struct *tsk) { }
+static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
+static inline bool vtime_accounting_enabled(void) { return false; }
+#endif
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void vtime_account_irq_exit(struct task_struct *tsk);
+extern bool vtime_accounting_enabled(void);
+extern void vtime_user_enter(struct task_struct *tsk);
+static inline void vtime_user_exit(struct task_struct *tsk)
+{
+       vtime_account_user(tsk);
+}
+extern void vtime_guest_enter(struct task_struct *tsk);
+extern void vtime_guest_exit(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk);
+#else
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+       /* On hard|softirq exit we always account to hard|softirq cputime */
+       vtime_account_system(tsk);
+}
+static inline void vtime_user_enter(struct task_struct *tsk) { }
+static inline void vtime_user_exit(struct task_struct *tsk) { }
+static inline void vtime_guest_enter(struct task_struct *tsk) { }
+static inline void vtime_guest_exit(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk) { }
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -23,25 +54,15 @@ extern void irqtime_account_irq(struct task_struct *tsk);
 static inline void irqtime_account_irq(struct task_struct *tsk) { }
 #endif
 
-static inline void vtime_account_irq_enter(struct task_struct *tsk)
+static inline void account_irq_enter_time(struct task_struct *tsk)
 {
-       /*
-        * Hardirq can interrupt idle task anytime. So we need vtime_account()
-        * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
-        * Softirq can also interrupt idle task directly if it calls
-        * local_bh_enable(). Such case probably don't exist but we never know.
-        * Ksoftirqd is not concerned because idle time is flushed on context
-        * switch. Softirqs in the end of hardirqs are also not a problem because
-        * the idle time is flushed on hardirq time already.
-        */
-       vtime_account(tsk);
+       vtime_account_irq_enter(tsk);
        irqtime_account_irq(tsk);
 }
 
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
+static inline void account_irq_exit_time(struct task_struct *tsk)
 {
-       /* On hard|softirq exit we always account to hard|softirq cputime */
-       vtime_account_system(tsk);
+       vtime_account_irq_exit(tsk);
        irqtime_account_irq(tsk);
 }
 
index 2b58905d3504f71f71bb7636f2e195c80090c577..8afab27cdbc2913bd95e8becb6c08fdfa06b7de9 100644 (file)
@@ -27,7 +27,7 @@ void delayed_work_timer_fn(unsigned long __data);
 enum {
        WORK_STRUCT_PENDING_BIT = 0,    /* work item is pending execution */
        WORK_STRUCT_DELAYED_BIT = 1,    /* work item is delayed */
-       WORK_STRUCT_CWQ_BIT     = 2,    /* data points to cwq */
+       WORK_STRUCT_PWQ_BIT     = 2,    /* data points to pwq */
        WORK_STRUCT_LINKED_BIT  = 3,    /* next work is linked to this one */
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
        WORK_STRUCT_STATIC_BIT  = 4,    /* static initializer (debugobjects) */
@@ -40,7 +40,7 @@ enum {
 
        WORK_STRUCT_PENDING     = 1 << WORK_STRUCT_PENDING_BIT,
        WORK_STRUCT_DELAYED     = 1 << WORK_STRUCT_DELAYED_BIT,
-       WORK_STRUCT_CWQ         = 1 << WORK_STRUCT_CWQ_BIT,
+       WORK_STRUCT_PWQ         = 1 << WORK_STRUCT_PWQ_BIT,
        WORK_STRUCT_LINKED      = 1 << WORK_STRUCT_LINKED_BIT,
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
        WORK_STRUCT_STATIC      = 1 << WORK_STRUCT_STATIC_BIT,
@@ -57,29 +57,36 @@ enum {
 
        /* special cpu IDs */
        WORK_CPU_UNBOUND        = NR_CPUS,
-       WORK_CPU_NONE           = NR_CPUS + 1,
-       WORK_CPU_LAST           = WORK_CPU_NONE,
+       WORK_CPU_END            = NR_CPUS + 1,
 
        /*
-        * Reserve 7 bits off of cwq pointer w/ debugobjects turned
-        * off.  This makes cwqs aligned to 256 bytes and allows 15
-        * workqueue flush colors.
+        * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
+        * This makes pwqs aligned to 256 bytes and allows 15 workqueue
+        * flush colors.
         */
        WORK_STRUCT_FLAG_BITS   = WORK_STRUCT_COLOR_SHIFT +
                                  WORK_STRUCT_COLOR_BITS,
 
-       /* data contains off-queue information when !WORK_STRUCT_CWQ */
+       /* data contains off-queue information when !WORK_STRUCT_PWQ */
        WORK_OFFQ_FLAG_BASE     = WORK_STRUCT_FLAG_BITS,
 
        WORK_OFFQ_CANCELING     = (1 << WORK_OFFQ_FLAG_BASE),
 
+       /*
+        * When a work item is off queue, its high bits point to the last
+        * pool it was on.  Cap at 31 bits and use the highest number to
+        * indicate that no pool is associated.
+        */
        WORK_OFFQ_FLAG_BITS     = 1,
-       WORK_OFFQ_CPU_SHIFT     = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+       WORK_OFFQ_POOL_SHIFT    = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+       WORK_OFFQ_LEFT          = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
+       WORK_OFFQ_POOL_BITS     = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
+       WORK_OFFQ_POOL_NONE     = (1LU << WORK_OFFQ_POOL_BITS) - 1,
 
        /* convenience constants */
        WORK_STRUCT_FLAG_MASK   = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
        WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
-       WORK_STRUCT_NO_CPU      = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
+       WORK_STRUCT_NO_POOL     = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
 
        /* bit mask for work_busy() return values */
        WORK_BUSY_PENDING       = 1 << 0,
@@ -95,13 +102,16 @@ struct work_struct {
 #endif
 };
 
-#define WORK_DATA_INIT()       ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
+#define WORK_DATA_INIT()       ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
 #define WORK_DATA_STATIC_INIT()        \
-       ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
+       ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
 
 struct delayed_work {
        struct work_struct work;
        struct timer_list timer;
+
+       /* target workqueue and CPU ->timer uses to queue ->work */
+       struct workqueue_struct *wq;
        int cpu;
 };
 
@@ -426,7 +436,6 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 extern void workqueue_set_max_active(struct workqueue_struct *wq,
                                     int max_active);
 extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
-extern unsigned int work_cpu(struct work_struct *work);
 extern unsigned int work_busy(struct work_struct *work);
 
 /*
index 0707fb9551aa4c1011c88969a42cd4482450d035..a68f838a132c522df397b9398f8f2d64646008c0 100644 (file)
@@ -143,6 +143,8 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
 extern int             ip4_datagram_connect(struct sock *sk, 
                                             struct sockaddr *uaddr, int addr_len);
 
+extern void ip4_datagram_release_cb(struct sock *sk);
+
 struct ip_reply_arg {
        struct kvec iov[1];   
        int         flags;
index d8f5b9f5216939d2053c8c99ad54925fb5a0ce5c..e98aeb3da033a38e29f745d5f043a33d3888bcc3 100644 (file)
@@ -31,6 +31,8 @@ extern void nf_conntrack_cleanup(struct net *net);
 extern int nf_conntrack_proto_init(struct net *net);
 extern void nf_conntrack_proto_fini(struct net *net);
 
+extern void nf_conntrack_cleanup_end(void);
+
 extern bool
 nf_ct_get_tuple(const struct sk_buff *skb,
                unsigned int nhoff,
index 498433dd067dd64eb6c728e566e61585093e960f..938b7fd1120477213888f9c7e61f98039bea3e03 100644 (file)
@@ -34,17 +34,17 @@ extern int                          udpv6_connect(struct sock *sk,
                                                      struct sockaddr *uaddr,
                                                      int addr_len);
 
-extern int                     datagram_recv_ctl(struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct sk_buff *skb);
-
-extern int                     datagram_send_ctl(struct net *net,
-                                                 struct sock *sk,
-                                                 struct msghdr *msg,
-                                                 struct flowi6 *fl6,
-                                                 struct ipv6_txoptions *opt,
-                                                 int *hlimit, int *tclass,
-                                                 int *dontfrag);
+extern int                     ip6_datagram_recv_ctl(struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct sk_buff *skb);
+
+extern int                     ip6_datagram_send_ctl(struct net *net,
+                                                     struct sock *sk,
+                                                     struct msghdr *msg,
+                                                     struct flowi6 *fl6,
+                                                     struct ipv6_txoptions *opt,
+                                                     int *hlimit, int *tclass,
+                                                     int *dontfrag);
 
 #define                LOOPBACK4_IPV6          cpu_to_be32(0x7f000006)
 
index 6d9e15ed1dcf0cb2875a10ca6795b14f3d4e0c51..dd8c48d14ed9637b3d1264ac5b88811e9be2dae6 100644 (file)
@@ -19,7 +19,7 @@
 
 struct cs4271_platform_data {
        int gpio_nreset;        /* GPIO driving Reset pin, if any */
-       int amutec_eq_bmutec:1; /* flag to enable AMUTEC=BMUTEC */
+       bool amutec_eq_bmutec;  /* flag to enable AMUTEC=BMUTEC */
 };
 
 #endif /* __CS4271_H */
index 769e27c774a3de382ec93650766621056dc78820..bc56738cb1091b8319b2413fe7e6b2f105ba0ac8 100644 (file)
@@ -58,8 +58,9 @@
        .info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
        .put = snd_soc_put_volsw_range, \
        .private_value = (unsigned long)&(struct soc_mixer_control) \
-               {.reg = xreg, .shift = xshift, .min = xmin,\
-                .max = xmax, .platform_max = xmax, .invert = xinvert} }
+               {.reg = xreg, .rreg = xreg, .shift = xshift, \
+                .rshift = xshift,  .min = xmin, .max = xmax, \
+                .platform_max = xmax, .invert = xinvert} }
 #define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -88,8 +89,9 @@
        .info = snd_soc_info_volsw_range, \
        .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
        .private_value = (unsigned long)&(struct soc_mixer_control) \
-               {.reg = xreg, .shift = xshift, .min = xmin,\
-                .max = xmax, .platform_max = xmax, .invert = xinvert} }
+               {.reg = xreg, .rreg = xreg, .shift = xshift, \
+                .rshift = xshift, .min = xmin, .max = xmax, \
+                .platform_max = xmax, .invert = xinvert} }
 #define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
        .info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
index 7cae2360221eb0e32f9e3a19dceaec939ade7296..663e34a5383f269d7c54a67f80fe9e73cbff1385 100644 (file)
@@ -174,6 +174,7 @@ typedef unsigned __bitwise__ sense_reason_t;
 
 enum tcm_sense_reason_table {
 #define R(x)   (__force sense_reason_t )(x)
+       TCM_NO_SENSE                            = R(0x00),
        TCM_NON_EXISTENT_LUN                    = R(0x01),
        TCM_UNSUPPORTED_SCSI_OPCODE             = R(0x02),
        TCM_INCORRECT_AMOUNT_OF_DATA            = R(0x03),
diff --git a/include/trace/events/ras.h b/include/trace/events/ras.h
new file mode 100644 (file)
index 0000000..88b8783
--- /dev/null
@@ -0,0 +1,77 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ras
+
+#if !defined(_TRACE_AER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_AER_H
+
+#include <linux/tracepoint.h>
+#include <linux/edac.h>
+
+
+/*
+ * PCIe AER Trace event
+ *
+ * These events are generated when hardware detects a corrected or
+ * uncorrected event on a PCIe device. The event report has
+ * the following structure:
+ *
+ * char * dev_name -   The name of the slot where the device resides
+ *                     ([domain:]bus:device.function).
+ * u32 status -                Either the correctable or uncorrectable register
+ *                     indicating what error or errors have been seen
+ * u8 severity -       error severity 0:NONFATAL 1:FATAL 2:CORRECTED
+ */
+
+#define aer_correctable_errors         \
+       {BIT(0),        "Receiver Error"},              \
+       {BIT(6),        "Bad TLP"},                     \
+       {BIT(7),        "Bad DLLP"},                    \
+       {BIT(8),        "RELAY_NUM Rollover"},          \
+       {BIT(12),       "Replay Timer Timeout"},        \
+       {BIT(13),       "Advisory Non-Fatal"}
+
+#define aer_uncorrectable_errors               \
+       {BIT(4),        "Data Link Protocol"},          \
+       {BIT(12),       "Poisoned TLP"},                \
+       {BIT(13),       "Flow Control Protocol"},       \
+       {BIT(14),       "Completion Timeout"},          \
+       {BIT(15),       "Completer Abort"},             \
+       {BIT(16),       "Unexpected Completion"},       \
+       {BIT(17),       "Receiver Overflow"},           \
+       {BIT(18),       "Malformed TLP"},               \
+       {BIT(19),       "ECRC"},                        \
+       {BIT(20),       "Unsupported Request"}
+
+TRACE_EVENT(aer_event,
+       TP_PROTO(const char *dev_name,
+                const u32 status,
+                const u8 severity),
+
+       TP_ARGS(dev_name, status, severity),
+
+       TP_STRUCT__entry(
+               __string(       dev_name,       dev_name        )
+               __field(        u32,            status          )
+               __field(        u8,             severity        )
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __entry->status         = status;
+               __entry->severity       = severity;
+       ),
+
+       TP_printk("%s PCIe Bus Error: severity=%s, %s\n",
+               __get_str(dev_name),
+               __entry->severity == HW_EVENT_ERR_CORRECTED ? "Corrected" :
+                       __entry->severity == HW_EVENT_ERR_FATAL ?
+                       "Fatal" : "Uncorrected",
+               __entry->severity == HW_EVENT_ERR_CORRECTED ?
+               __print_flags(__entry->status, "|", aer_correctable_errors) :
+               __print_flags(__entry->status, "|", aer_uncorrectable_errors))
+);
+
+#endif /* _TRACE_AER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index d4f559b1ec3444eada48e8e9e1d8bd85f36da5b6..1918e832da4ffce61d1a057bd706ed52f81b07ce 100644 (file)
@@ -44,8 +44,10 @@ TRACE_EVENT(rcu_utilization,
  * of a new grace period or the end of an old grace period ("cpustart"
  * and "cpuend", respectively), a CPU passing through a quiescent
  * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
- * and "cpuofl", respectively), and a CPU being kicked for being too
- * long in dyntick-idle mode ("kick").
+ * and "cpuofl", respectively), a CPU being kicked for being too
+ * long in dyntick-idle mode ("kick"), a CPU accelerating its new
+ * callbacks to RCU_NEXT_READY_TAIL ("AccReadyCB"), and a CPU
+ * accelerating its new callbacks to RCU_WAIT_TAIL ("AccWaitCB").
  */
 TRACE_EVENT(rcu_grace_period,
 
@@ -393,7 +395,7 @@ TRACE_EVENT(rcu_kfree_callback,
  */
 TRACE_EVENT(rcu_batch_start,
 
-       TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit),
+       TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
 
        TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
 
@@ -401,7 +403,7 @@ TRACE_EVENT(rcu_batch_start,
                __field(char *, rcuname)
                __field(long, qlen_lazy)
                __field(long, qlen)
-               __field(int, blimit)
+               __field(long, blimit)
        ),
 
        TP_fast_assign(
@@ -411,7 +413,7 @@ TRACE_EVENT(rcu_batch_start,
                __entry->blimit = blimit;
        ),
 
-       TP_printk("%s CBs=%ld/%ld bl=%d",
+       TP_printk("%s CBs=%ld/%ld bl=%ld",
                  __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
                  __entry->blimit)
 );
@@ -523,22 +525,30 @@ TRACE_EVENT(rcu_batch_end,
  */
 TRACE_EVENT(rcu_torture_read,
 
-       TP_PROTO(char *rcutorturename, struct rcu_head *rhp),
+       TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
+                unsigned long secs, unsigned long c_old, unsigned long c),
 
-       TP_ARGS(rcutorturename, rhp),
+       TP_ARGS(rcutorturename, rhp, secs, c_old, c),
 
        TP_STRUCT__entry(
                __field(char *, rcutorturename)
                __field(struct rcu_head *, rhp)
+               __field(unsigned long, secs)
+               __field(unsigned long, c_old)
+               __field(unsigned long, c)
        ),
 
        TP_fast_assign(
                __entry->rcutorturename = rcutorturename;
                __entry->rhp = rhp;
+               __entry->secs = secs;
+               __entry->c_old = c_old;
+               __entry->c = c;
        ),
 
-       TP_printk("%s torture read %p",
-                 __entry->rcutorturename, __entry->rhp)
+       TP_printk("%s torture read %p %luus c: %lu %lu",
+                 __entry->rcutorturename, __entry->rhp,
+                 __entry->secs, __entry->c_old, __entry->c)
 );
 
 /*
@@ -608,7 +618,8 @@ TRACE_EVENT(rcu_barrier,
 #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
 #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
        do { } while (0)
-#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
+       do { } while (0)
 #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
index f28d1b65f17817db3bf81d907d8a50460a572016..bf0e18ba6cfb0d062b5c0b15a98dff854a5d2ad9 100644 (file)
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(workqueue_work,
 /**
  * workqueue_queue_work - called when a work gets queued
  * @req_cpu:   the requested cpu
- * @cwq:       pointer to struct cpu_workqueue_struct
+ * @pwq:       pointer to struct pool_workqueue
  * @work:      pointer to struct work_struct
  *
  * This event occurs when a work is queued immediately or once a
@@ -36,10 +36,10 @@ DECLARE_EVENT_CLASS(workqueue_work,
  */
 TRACE_EVENT(workqueue_queue_work,
 
-       TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
+       TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
                 struct work_struct *work),
 
-       TP_ARGS(req_cpu, cwq, work),
+       TP_ARGS(req_cpu, pwq, work),
 
        TP_STRUCT__entry(
                __field( void *,        work    )
@@ -52,9 +52,9 @@ TRACE_EVENT(workqueue_queue_work,
        TP_fast_assign(
                __entry->work           = work;
                __entry->function       = work->func;
-               __entry->workqueue      = cwq->wq;
+               __entry->workqueue      = pwq->wq;
                __entry->req_cpu        = req_cpu;
-               __entry->cpu            = cwq->pool->gcwq->cpu;
+               __entry->cpu            = pwq->pool->cpu;
        ),
 
        TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
index 76352ac45f24bf1290196327ad9af8fb504e4d69..9f096f1c0907b7056bcbe396783fdb2237724a65 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <linux/types.h>
 #include <linux/elf-em.h>
-#include <linux/ptrace.h>
 
 /* The netlink messages for the audit system is divided into blocks:
  * 1000 - 1099 are for commanding the audit system
 #define AUDIT_MMAP             1323    /* Record showing descriptor and flags in mmap */
 #define AUDIT_NETFILTER_PKT    1324    /* Packets traversing netfilter chains */
 #define AUDIT_NETFILTER_CFG    1325    /* Netfilter chain modifications */
+#define AUDIT_SECCOMP          1326    /* Secure Computing event */
 
 #define AUDIT_AVC              1400    /* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR      1401    /* Internal SE Linux Errors */
index 77cdba9df274c09f2d9e8972fca63da215d1a8d9..bb991dfe134f03743a4e40c7024324289910b0e9 100644 (file)
 #define AUTOFS_MIN_PROTO_VERSION       AUTOFS_PROTO_VERSION
 
 /*
- * Architectures where both 32- and 64-bit binaries can be executed
- * on 64-bit kernels need this.  This keeps the structure format
- * uniform, and makes sure the wait_queue_token isn't too big to be
- * passed back down to the kernel.
- *
- * This assumes that on these architectures:
- * mode     32 bit    64 bit
- * -------------------------
- * int      32 bit    32 bit
- * long     32 bit    64 bit
- *
- * If so, 32-bit user-space code should be backwards compatible.
+ * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
+ * back to the kernel via ioctl from userspace. On architectures where 32- and
+ * 64-bit userspace binaries can be executed it's important that the size of
+ * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
+ * do not break the binary ABI interface by changing the structure size.
  */
-
-#if defined(__sparc__) || defined(__mips__) || defined(__x86_64__) \
- || defined(__powerpc__) || defined(__s390__)
-typedef unsigned int autofs_wqt_t;
-#else
+#if defined(__ia64__) || defined(__alpha__) /* pure 64bit architectures */
 typedef unsigned long autofs_wqt_t;
+#else
+typedef unsigned int autofs_wqt_t;
 #endif
 
 /* Packet types */
index 4f63c05d27c91d04569971ea4a2c4849203c36a9..9fa9c622a7f45ab6032aefd74001930b4d7ddb07 100644 (file)
@@ -579,7 +579,8 @@ enum perf_event_type {
         *      { u32                   size;
         *        char                  data[size];}&& PERF_SAMPLE_RAW
         *
-        *      { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+        *      { u64                   nr;
+        *        { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
         *
         *      { u64                   abi; # enum perf_sample_regs_abi
         *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
index 78f99d97475b50c8427607d9bd23a3457a27319a..2c6c85f18ea027440761c780134bb757b0c53c8e 100644 (file)
@@ -50,7 +50,8 @@
 #define PORT_LPC3220   22      /* NXP LPC32xx SoC "Standard" UART */
 #define PORT_8250_CIR  23      /* CIR infrared port, has its own driver */
 #define PORT_XR17V35X  24      /* Exar XR17V35x UARTs */
-#define PORT_MAX_8250  24      /* max port ID */
+#define PORT_BRCM_TRUMANAGE    24
+#define PORT_MAX_8250  25      /* max port ID */
 
 /*
  * ARM specific type numbers.  These are not currently guaranteed
index 50598472dc411ea1b522ac5d499bd7ac8bba37ec..f738e25377ffbc3ed8e794246f03f718bdb1bab4 100644 (file)
 #define USB_INTRF_FUNC_SUSPEND_LP      (1 << (8 + 0))
 #define USB_INTRF_FUNC_SUSPEND_RW      (1 << (8 + 1))
 
+/*
+ * Interface status, Figure 9-5 USB 3.0 spec
+ */
+#define USB_INTRF_STAT_FUNC_RW_CAP     1
+#define USB_INTRF_STAT_FUNC_RW         2
+
 #define USB_ENDPOINT_HALT              0       /* IN/OUT will STALL */
 
 /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
index 7d30240e5bfef76aedc3a8eee2a7991abe0f0a18..7000d96574025bda58c5253cf10e92d0249de9eb 100644 (file)
@@ -20,12 +20,8 @@ config CONSTRUCTORS
        bool
        depends on !UML
 
-config HAVE_IRQ_WORK
-       bool
-
 config IRQ_WORK
        bool
-       depends on HAVE_IRQ_WORK
 
 config BUILDTIME_EXTABLE_SORT
        bool
@@ -326,10 +322,13 @@ source "kernel/time/Kconfig"
 
 menu "CPU/Task time and stats accounting"
 
+config VIRT_CPU_ACCOUNTING
+       bool
+
 choice
        prompt "Cputime accounting"
        default TICK_CPU_ACCOUNTING if !PPC64
-       default VIRT_CPU_ACCOUNTING if PPC64
+       default VIRT_CPU_ACCOUNTING_NATIVE if PPC64
 
 # Kind of a stub config for the pure tick based cputime accounting
 config TICK_CPU_ACCOUNTING
@@ -342,9 +341,10 @@ config TICK_CPU_ACCOUNTING
 
          If unsure, say Y.
 
-config VIRT_CPU_ACCOUNTING
+config VIRT_CPU_ACCOUNTING_NATIVE
        bool "Deterministic task and CPU time accounting"
        depends on HAVE_VIRT_CPU_ACCOUNTING
+       select VIRT_CPU_ACCOUNTING
        help
          Select this option to enable more accurate task and CPU time
          accounting.  This is done by reading a CPU counter on each
@@ -354,6 +354,23 @@ config VIRT_CPU_ACCOUNTING
          this also enables accounting of stolen time on logically-partitioned
          systems.
 
+config VIRT_CPU_ACCOUNTING_GEN
+       bool "Full dynticks CPU time accounting"
+       depends on HAVE_CONTEXT_TRACKING && 64BIT
+       select VIRT_CPU_ACCOUNTING
+       select CONTEXT_TRACKING
+       help
+         Select this option to enable task and CPU time accounting on full
+         dynticks systems. This accounting is implemented by watching every
+         kernel-user boundaries using the context tracking subsystem.
+         The accounting is thus performed at the expense of some significant
+         overhead.
+
+         For now this is only useful if you are working on the full
+         dynticks subsystem development.
+
+         If unsure, say N.
+
 config IRQ_TIME_ACCOUNTING
        bool "Fine granularity task level IRQ time accounting"
        depends on HAVE_IRQ_TIME_ACCOUNTING
@@ -453,7 +470,7 @@ config TREE_RCU
 
 config TREE_PREEMPT_RCU
        bool "Preemptible tree-based hierarchical RCU"
-       depends on PREEMPT && SMP
+       depends on PREEMPT
        help
          This option selects the RCU implementation that is
          designed for very large SMP systems with hundreds or
@@ -461,6 +478,8 @@ config TREE_PREEMPT_RCU
          is also required.  It also scales down nicely to
          smaller systems.
 
+         Select this option if you are unsure.
+
 config TINY_RCU
        bool "UP-only small-memory-footprint RCU"
        depends on !PREEMPT && !SMP
@@ -486,6 +505,14 @@ config PREEMPT_RCU
          This option enables preemptible-RCU code that is common between
          the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
 
+config RCU_STALL_COMMON
+       def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE )
+       help
+         This option enables RCU CPU stall code that is common between
+         the TINY and TREE variants of RCU.  The purpose is to allow
+         the tiny variants to disable RCU CPU stall warnings, while
+         making these warnings mandatory for the tree variants.
+
 config CONTEXT_TRACKING
        bool
 
@@ -1182,7 +1209,7 @@ config CC_OPTIMIZE_FOR_SIZE
          Enabling this option will pass "-Os" instead of "-O2" to gcc
          resulting in a smaller kernel.
 
-         If unsure, say Y.
+         If unsure, say N.
 
 config SYSCTL
        bool
@@ -1263,6 +1290,7 @@ config HOTPLUG
 config PRINTK
        default y
        bool "Enable support for printk" if EXPERT
+       select IRQ_WORK
        help
          This option enables normal printk support. Removing it
          eliminates most of the message strings from the kernel image
index 5e4ded51788eb81f277c0b5674f5602730220df4..a32ec1ce882b22dc96ed714a3f68e5838ed98af3 100644 (file)
@@ -36,6 +36,10 @@ __setup("noinitrd", no_initrd);
 static int init_linuxrc(struct subprocess_info *info, struct cred *new)
 {
        sys_unshare(CLONE_FS | CLONE_FILES);
+       /* stdin/stdout/stderr for /linuxrc */
+       sys_open("/dev/console", O_RDWR, 0);
+       sys_dup(0);
+       sys_dup(0);
        /* move initrd over / and chdir/chroot in initrd root */
        sys_chdir("/root");
        sys_mount(".", "/", NULL, MS_MOVE, NULL);
@@ -57,6 +61,9 @@ static void __init handle_initrd(void)
        sys_mkdir("/old", 0700);
        sys_chdir("/old");
 
+       /* try loading default modules from initrd */
+       load_default_modules();
+
        /*
         * In case that a resume from disk is carried out by linuxrc or one of
         * its children, we need to tell the freezer not to wait for us.
index 8b2f3996b035f1aa44591afd29ea2f546706fae4..ba0a7f362d9e3ffbf00a45c7baa5c2f0a2186b8f 100644 (file)
@@ -2,6 +2,8 @@
 #include <linux/export.h>
 #include <linux/mqueue.h>
 #include <linux/sched.h>
+#include <linux/sched/sysctl.h>
+#include <linux/sched/rt.h>
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/mm.h>
index 84c6bf111300878a095a9fb3f8f91678dbe10b65..a67ef9dbda9dae0ea7a1619a393c55157a6d8592 100644 (file)
@@ -592,7 +592,7 @@ static int __init populate_rootfs(void)
                        initrd_end - initrd_start);
                if (!err) {
                        free_initrd();
-                       return 0;
+                       goto done;
                } else {
                        clean_rootfs();
                        unpack_to_rootfs(__initramfs_start, __initramfs_size);
@@ -607,6 +607,7 @@ static int __init populate_rootfs(void)
                        sys_close(fd);
                        free_initrd();
                }
+       done:
 #else
                printk(KERN_INFO "Unpacking initramfs...\n");
                err = unpack_to_rootfs((char *)initrd_start,
@@ -615,6 +616,11 @@ static int __init populate_rootfs(void)
                        printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
                free_initrd();
 #endif
+               /*
+                * Try loading default modules from initramfs.  This gives
+                * us a chance to load before device_initcalls.
+                */
+               load_default_modules();
        }
        return 0;
 }
index 85d69dffe8647bf284db0bf51ef75f5b318e9eda..63534a141b4eb6c0f36c5ac0d99d2733028ff96e 100644 (file)
@@ -70,6 +70,8 @@
 #include <linux/perf_event.h>
 #include <linux/file.h>
 #include <linux/ptrace.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -604,7 +606,7 @@ asmlinkage void __init start_kernel(void)
        pidmap_init();
        anon_vma_init();
 #ifdef CONFIG_X86
-       if (efi_enabled)
+       if (efi_enabled(EFI_RUNTIME_SERVICES))
                efi_enter_virtual_mode();
 #endif
        thread_info_cache_init();
@@ -632,7 +634,7 @@ asmlinkage void __init start_kernel(void)
        acpi_early_init(); /* before LAPIC and SMP init */
        sfi_init_late();
 
-       if (efi_enabled) {
+       if (efi_enabled(EFI_RUNTIME_SERVICES)) {
                efi_late_init();
                efi_free_boot_services();
        }
@@ -794,6 +796,17 @@ static void __init do_pre_smp_initcalls(void)
                do_one_initcall(*fn);
 }
 
+/*
+ * This function requests modules which should be loaded by default and is
+ * called twice right after initrd is mounted and right before init is
+ * exec'd.  If such modules are on either initrd or rootfs, they will be
+ * loaded before control is passed to userland.
+ */
+void __init load_default_modules(void)
+{
+       load_default_elevator_module();
+}
+
 static int run_init_process(const char *init_filename)
 {
        argv_init[0] = init_filename;
@@ -802,7 +815,7 @@ static int run_init_process(const char *init_filename)
                (const char __user *const __user *)envp_init);
 }
 
-static void __init kernel_init_freeable(void);
+static noinline void __init kernel_init_freeable(void);
 
 static int __ref kernel_init(void *unused)
 {
@@ -845,7 +858,7 @@ static int __ref kernel_init(void *unused)
              "See Linux Documentation/init.txt for guidance.");
 }
 
-static void __init kernel_init_freeable(void)
+static noinline void __init kernel_init_freeable(void)
 {
        /*
         * Wait until kthreadd is all set-up.
@@ -900,4 +913,7 @@ static void __init kernel_init_freeable(void)
         * we're essentially up and running. Get rid of the
         * initmem segments and start the user-mode stuff..
         */
+
+       /* rootfs is available now, try loading default modules */
+       load_default_modules();
 }
index 051e071a06e7a745a96c2fac6317d330dc33da43..e8b1627ab9c7cf21892acb45722293815ada0ede 100644 (file)
@@ -566,6 +566,7 @@ out:
 void acct_collect(long exitcode, int group_dead)
 {
        struct pacct_struct *pacct = &current->signal->pacct;
+       cputime_t utime, stime;
        unsigned long vsize = 0;
 
        if (group_dead && current->mm) {
@@ -593,8 +594,9 @@ void acct_collect(long exitcode, int group_dead)
                pacct->ac_flag |= ACORE;
        if (current->flags & PF_SIGNALED)
                pacct->ac_flag |= AXSIG;
-       pacct->ac_utime += current->utime;
-       pacct->ac_stime += current->stime;
+       task_cputime(current, &utime, &stime);
+       pacct->ac_utime += utime;
+       pacct->ac_stime += stime;
        pacct->ac_minflt += current->min_flt;
        pacct->ac_majflt += current->maj_flt;
        spin_unlock_irq(&current->sighand->siglock);
index 9d31183848582ca8da812d70d2d68364ff898f1d..8ddee2c3e5b04ac290f0d923fd21c60d2f85ffd2 100644 (file)
@@ -57,56 +57,52 @@ asynchronous and synchronous parts of the kernel.
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 
+#include "workqueue_internal.h"
+
 static async_cookie_t next_cookie = 1;
 
-#define MAX_WORK       32768
+#define MAX_WORK               32768
+#define ASYNC_COOKIE_MAX       ULLONG_MAX      /* infinity cookie */
 
-static LIST_HEAD(async_pending);
-static ASYNC_DOMAIN(async_running);
-static LIST_HEAD(async_domains);
+static LIST_HEAD(async_global_pending);        /* pending from all registered doms */
+static ASYNC_DOMAIN(async_dfl_domain);
 static DEFINE_SPINLOCK(async_lock);
-static DEFINE_MUTEX(async_register_mutex);
 
 struct async_entry {
-       struct list_head        list;
+       struct list_head        domain_list;
+       struct list_head        global_list;
        struct work_struct      work;
        async_cookie_t          cookie;
        async_func_ptr          *func;
        void                    *data;
-       struct async_domain     *running;
+       struct async_domain     *domain;
 };
 
 static DECLARE_WAIT_QUEUE_HEAD(async_done);
 
 static atomic_t entry_count;
 
-
-/*
- * MUST be called with the lock held!
- */
-static async_cookie_t  __lowest_in_progress(struct async_domain *running)
+static async_cookie_t lowest_in_progress(struct async_domain *domain)
 {
-       struct async_entry *entry;
-
-       if (!list_empty(&running->domain)) {
-               entry = list_first_entry(&running->domain, typeof(*entry), list);
-               return entry->cookie;
-       }
+       struct async_entry *first = NULL;
+       async_cookie_t ret = ASYNC_COOKIE_MAX;
+       unsigned long flags;
 
-       list_for_each_entry(entry, &async_pending, list)
-               if (entry->running == running)
-                       return entry->cookie;
+       spin_lock_irqsave(&async_lock, flags);
 
-       return next_cookie;     /* "infinity" value */
-}
+       if (domain) {
+               if (!list_empty(&domain->pending))
+                       first = list_first_entry(&domain->pending,
+                                       struct async_entry, domain_list);
+       } else {
+               if (!list_empty(&async_global_pending))
+                       first = list_first_entry(&async_global_pending,
+                                       struct async_entry, global_list);
+       }
 
-static async_cookie_t  lowest_in_progress(struct async_domain *running)
-{
-       unsigned long flags;
-       async_cookie_t ret;
+       if (first)
+               ret = first->cookie;
 
-       spin_lock_irqsave(&async_lock, flags);
-       ret = __lowest_in_progress(running);
        spin_unlock_irqrestore(&async_lock, flags);
        return ret;
 }
@@ -120,14 +116,8 @@ static void async_run_entry_fn(struct work_struct *work)
                container_of(work, struct async_entry, work);
        unsigned long flags;
        ktime_t uninitialized_var(calltime), delta, rettime;
-       struct async_domain *running = entry->running;
 
-       /* 1) move self to the running queue */
-       spin_lock_irqsave(&async_lock, flags);
-       list_move_tail(&entry->list, &running->domain);
-       spin_unlock_irqrestore(&async_lock, flags);
-
-       /* 2) run (and print duration) */
+       /* 1) run (and print duration) */
        if (initcall_debug && system_state == SYSTEM_BOOTING) {
                printk(KERN_DEBUG "calling  %lli_%pF @ %i\n",
                        (long long)entry->cookie,
@@ -144,23 +134,22 @@ static void async_run_entry_fn(struct work_struct *work)
                        (long long)ktime_to_ns(delta) >> 10);
        }
 
-       /* 3) remove self from the running queue */
+       /* 2) remove self from the pending queues */
        spin_lock_irqsave(&async_lock, flags);
-       list_del(&entry->list);
-       if (running->registered && --running->count == 0)
-               list_del_init(&running->node);
+       list_del_init(&entry->domain_list);
+       list_del_init(&entry->global_list);
 
-       /* 4) free the entry */
+       /* 3) free the entry */
        kfree(entry);
        atomic_dec(&entry_count);
 
        spin_unlock_irqrestore(&async_lock, flags);
 
-       /* 5) wake up any waiters */
+       /* 4) wake up any waiters */
        wake_up(&async_done);
 }
 
-static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
+static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *domain)
 {
        struct async_entry *entry;
        unsigned long flags;
@@ -183,19 +172,28 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
                ptr(data, newcookie);
                return newcookie;
        }
+       INIT_LIST_HEAD(&entry->domain_list);
+       INIT_LIST_HEAD(&entry->global_list);
        INIT_WORK(&entry->work, async_run_entry_fn);
        entry->func = ptr;
        entry->data = data;
-       entry->running = running;
+       entry->domain = domain;
 
        spin_lock_irqsave(&async_lock, flags);
+
+       /* allocate cookie and queue */
        newcookie = entry->cookie = next_cookie++;
-       list_add_tail(&entry->list, &async_pending);
-       if (running->registered && running->count++ == 0)
-               list_add_tail(&running->node, &async_domains);
+
+       list_add_tail(&entry->domain_list, &domain->pending);
+       if (domain->registered)
+               list_add_tail(&entry->global_list, &async_global_pending);
+
        atomic_inc(&entry_count);
        spin_unlock_irqrestore(&async_lock, flags);
 
+       /* mark that this task has queued an async job, used by module init */
+       current->flags |= PF_USED_ASYNC;
+
        /* schedule for execution */
        queue_work(system_unbound_wq, &entry->work);
 
@@ -212,7 +210,7 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
  */
 async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
 {
-       return __async_schedule(ptr, data, &async_running);
+       return __async_schedule(ptr, data, &async_dfl_domain);
 }
 EXPORT_SYMBOL_GPL(async_schedule);
 
@@ -220,18 +218,18 @@ EXPORT_SYMBOL_GPL(async_schedule);
  * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
  * @ptr: function to execute asynchronously
  * @data: data pointer to pass to the function
- * @running: running list for the domain
+ * @domain: the domain
  *
  * Returns an async_cookie_t that may be used for checkpointing later.
- * @running may be used in the async_synchronize_*_domain() functions
- * to wait within a certain synchronization domain rather than globally.
- * A synchronization domain is specified via the running queue @running to use.
- * Note: This function may be called from atomic or non-atomic contexts.
+ * @domain may be used in the async_synchronize_*_domain() functions to
+ * wait within a certain synchronization domain rather than globally.  A
+ * synchronization domain is specified via @domain.  Note: This function
+ * may be called from atomic or non-atomic contexts.
  */
 async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
-                                    struct async_domain *running)
+                                    struct async_domain *domain)
 {
-       return __async_schedule(ptr, data, running);
+       return __async_schedule(ptr, data, domain);
 }
 EXPORT_SYMBOL_GPL(async_schedule_domain);
 
@@ -242,18 +240,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
  */
 void async_synchronize_full(void)
 {
-       mutex_lock(&async_register_mutex);
-       do {
-               struct async_domain *domain = NULL;
-
-               spin_lock_irq(&async_lock);
-               if (!list_empty(&async_domains))
-                       domain = list_first_entry(&async_domains, typeof(*domain), node);
-               spin_unlock_irq(&async_lock);
-
-               async_synchronize_cookie_domain(next_cookie, domain);
-       } while (!list_empty(&async_domains));
-       mutex_unlock(&async_register_mutex);
+       async_synchronize_full_domain(NULL);
 }
 EXPORT_SYMBOL_GPL(async_synchronize_full);
 
@@ -268,51 +255,45 @@ EXPORT_SYMBOL_GPL(async_synchronize_full);
  */
 void async_unregister_domain(struct async_domain *domain)
 {
-       mutex_lock(&async_register_mutex);
        spin_lock_irq(&async_lock);
-       WARN_ON(!domain->registered || !list_empty(&domain->node) ||
-               !list_empty(&domain->domain));
+       WARN_ON(!domain->registered || !list_empty(&domain->pending));
        domain->registered = 0;
        spin_unlock_irq(&async_lock);
-       mutex_unlock(&async_register_mutex);
 }
 EXPORT_SYMBOL_GPL(async_unregister_domain);
 
 /**
  * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
- * @domain: running list to synchronize on
+ * @domain: the domain to synchronize
  *
  * This function waits until all asynchronous function calls for the
- * synchronization domain specified by the running list @domain have been done.
+ * synchronization domain specified by @domain have been done.
  */
 void async_synchronize_full_domain(struct async_domain *domain)
 {
-       async_synchronize_cookie_domain(next_cookie, domain);
+       async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
 }
 EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
 
 /**
  * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
  * @cookie: async_cookie_t to use as checkpoint
- * @running: running list to synchronize on
+ * @domain: the domain to synchronize (%NULL for all registered domains)
  *
  * This function waits until all asynchronous function calls for the
- * synchronization domain specified by running list @running submitted
- * prior to @cookie have been done.
+ * synchronization domain specified by @domain submitted prior to @cookie
+ * have been done.
  */
-void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
+void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
 {
        ktime_t uninitialized_var(starttime), delta, endtime;
 
-       if (!running)
-               return;
-
        if (initcall_debug && system_state == SYSTEM_BOOTING) {
                printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
                starttime = ktime_get();
        }
 
-       wait_event(async_done, lowest_in_progress(running) >= cookie);
+       wait_event(async_done, lowest_in_progress(domain) >= cookie);
 
        if (initcall_debug && system_state == SYSTEM_BOOTING) {
                endtime = ktime_get();
@@ -334,6 +315,18 @@ EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
  */
 void async_synchronize_cookie(async_cookie_t cookie)
 {
-       async_synchronize_cookie_domain(cookie, &async_running);
+       async_synchronize_cookie_domain(cookie, &async_dfl_domain);
 }
 EXPORT_SYMBOL_GPL(async_synchronize_cookie);
+
+/**
+ * current_is_async - is %current an async worker task?
+ *
+ * Returns %true if %current is an async worker task.
+ */
+bool current_is_async(void)
+{
+       struct worker *worker = current_wq_worker();
+
+       return worker && worker->current_func == async_run_entry_fn;
+}
index 40414e9143db609b194b04e95c798351118f07b7..d596e5355f153d0cac5da1bc7806239a1ec6b860 100644 (file)
@@ -272,6 +272,8 @@ static int audit_log_config_change(char *function_name, int new, int old,
        int rc = 0;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+       if (unlikely(!ab))
+               return rc;
        audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
                         old, from_kuid(&init_user_ns, loginuid), sessionid);
        if (sid) {
@@ -619,6 +621,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        }
 
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
+       if (unlikely(!*ab))
+               return rc;
        audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
                         task_tgid_vnr(current),
                         from_kuid(&init_user_ns, current_uid()),
@@ -1097,6 +1101,23 @@ static inline void audit_get_stamp(struct audit_context *ctx,
        }
 }
 
+/*
+ * Wait for auditd to drain the queue a little
+ */
+static void wait_for_auditd(unsigned long sleep_time)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&audit_backlog_wait, &wait);
+
+       if (audit_backlog_limit &&
+           skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
+               schedule_timeout(sleep_time);
+
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&audit_backlog_wait, &wait);
+}
+
 /* Obtain an audit buffer.  This routine does locking to obtain the
  * audit buffer, but then no locking is required for calls to
  * audit_log_*format.  If the tsk is a task that is currently in a
@@ -1142,20 +1163,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
 
        while (audit_backlog_limit
               && skb_queue_len(&audit_skb_queue) > audit_backlog_limit + reserve) {
-               if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time
-                   && time_before(jiffies, timeout_start + audit_backlog_wait_time)) {
+               if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) {
+                       unsigned long sleep_time;
 
-                       /* Wait for auditd to drain the queue a little */
-                       DECLARE_WAITQUEUE(wait, current);
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       add_wait_queue(&audit_backlog_wait, &wait);
-
-                       if (audit_backlog_limit &&
-                           skb_queue_len(&audit_skb_queue) > audit_backlog_limit)
-                               schedule_timeout(timeout_start + audit_backlog_wait_time - jiffies);
-
-                       __set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&audit_backlog_wait, &wait);
+                       sleep_time = timeout_start + audit_backlog_wait_time -
+                                       jiffies;
+                       if ((long)sleep_time > 0)
+                               wait_for_auditd(sleep_time);
                        continue;
                }
                if (audit_rate_check() && printk_ratelimit())
index e81175ef25f82d129dbe4c3b5df70f718d59d782..642a89c4f3d60c23cabf89b86d387f2a29b2bea9 100644 (file)
@@ -449,11 +449,26 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
        return 0;
 }
 
+static void audit_log_remove_rule(struct audit_krule *rule)
+{
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
+       if (unlikely(!ab))
+               return;
+       audit_log_format(ab, "op=");
+       audit_log_string(ab, "remove rule");
+       audit_log_format(ab, " dir=");
+       audit_log_untrustedstring(ab, rule->tree->pathname);
+       audit_log_key(ab, rule->filterkey);
+       audit_log_format(ab, " list=%d res=1", rule->listnr);
+       audit_log_end(ab);
+}
+
 static void kill_rules(struct audit_tree *tree)
 {
        struct audit_krule *rule, *next;
        struct audit_entry *entry;
-       struct audit_buffer *ab;
 
        list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
                entry = container_of(rule, struct audit_entry, rule);
@@ -461,14 +476,7 @@ static void kill_rules(struct audit_tree *tree)
                list_del_init(&rule->rlist);
                if (rule->tree) {
                        /* not a half-baked one */
-                       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
-                       audit_log_format(ab, "op=");
-                       audit_log_string(ab, "remove rule");
-                       audit_log_format(ab, " dir=");
-                       audit_log_untrustedstring(ab, rule->tree->pathname);
-                       audit_log_key(ab, rule->filterkey);
-                       audit_log_format(ab, " list=%d res=1", rule->listnr);
-                       audit_log_end(ab);
+                       audit_log_remove_rule(rule);
                        rule->tree = NULL;
                        list_del_rcu(&entry->list);
                        list_del(&entry->rule.list);
index 4a599f699adcfeca242e34f1a9691385a500450e..22831c4d369c67d988b1f0702f9db51bac6275bf 100644 (file)
@@ -240,6 +240,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
        if (audit_enabled) {
                struct audit_buffer *ab;
                ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
+               if (unlikely(!ab))
+                       return;
                audit_log_format(ab, "auid=%u ses=%u op=",
                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
                                 audit_get_sessionid(current));
index 7f19f23d38a3347373ec00a629c6c0b39a8f770a..f9fc54bbe06faa3e845e19871d15ace3e6c5e1b3 100644 (file)
@@ -1144,7 +1144,6 @@ static void audit_log_rule_change(kuid_t loginuid, u32 sessionid, u32 sid,
  * audit_receive_filter - apply all rules to the specified message type
  * @type: audit message type
  * @pid: target pid for netlink audit messages
- * @uid: target uid for netlink audit messages
  * @seq: netlink audit message sequence (serial) number
  * @data: payload data
  * @datasz: size of payload data
index e37e6a12c5e32c204ca93b4422c52ffaee72aad3..a371f857a0a908a40a960511bf047d81ade471a9 100644 (file)
@@ -1464,14 +1464,14 @@ static void show_special(struct audit_context *context, int *call_panic)
                        audit_log_end(ab);
                        ab = audit_log_start(context, GFP_KERNEL,
                                             AUDIT_IPC_SET_PERM);
+                       if (unlikely(!ab))
+                               return;
                        audit_log_format(ab,
                                "qbytes=%lx ouid=%u ogid=%u mode=%#ho",
                                context->ipc.qbytes,
                                context->ipc.perm_uid,
                                context->ipc.perm_gid,
                                context->ipc.perm_mode);
-                       if (!ab)
-                               return;
                }
                break; }
        case AUDIT_MQ_OPEN: {
@@ -2675,7 +2675,7 @@ void __audit_mmap_fd(int fd, int flags)
        context->type = AUDIT_MMAP;
 }
 
-static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+static void audit_log_task(struct audit_buffer *ab)
 {
        kuid_t auid, uid;
        kgid_t gid;
@@ -2693,6 +2693,11 @@ static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
        audit_log_task_context(ab);
        audit_log_format(ab, " pid=%d comm=", current->pid);
        audit_log_untrustedstring(ab, current->comm);
+}
+
+static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+{
+       audit_log_task(ab);
        audit_log_format(ab, " reason=");
        audit_log_string(ab, reason);
        audit_log_format(ab, " sig=%ld", signr);
@@ -2715,6 +2720,8 @@ void audit_core_dumps(long signr)
                return;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+       if (unlikely(!ab))
+               return;
        audit_log_abend(ab, "memory violation", signr);
        audit_log_end(ab);
 }
@@ -2723,8 +2730,11 @@ void __audit_seccomp(unsigned long syscall, long signr, int code)
 {
        struct audit_buffer *ab;
 
-       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-       audit_log_abend(ab, "seccomp", signr);
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_SECCOMP);
+       if (unlikely(!ab))
+               return;
+       audit_log_task(ab);
+       audit_log_format(ab, " sig=%ld", signr);
        audit_log_format(ab, " syscall=%ld", syscall);
        audit_log_format(ab, " compat=%d", is_compat_task());
        audit_log_format(ab, " ip=0x%lx", KSTK_EIP(current));
index 4855892798fdfd6aeee695a4fc1d2a70b96662eb..b5c64327e7127491e93507052ae5a600931ce158 100644 (file)
@@ -52,7 +52,7 @@
 #include <linux/module.h>
 #include <linux/delayacct.h>
 #include <linux/cgroupstats.h>
-#include <linux/hash.h>
+#include <linux/hashtable.h>
 #include <linux/namei.h>
 #include <linux/pid_namespace.h>
 #include <linux/idr.h>
@@ -376,22 +376,18 @@ static int css_set_count;
  * account cgroups in empty hierarchies.
  */
 #define CSS_SET_HASH_BITS      7
-#define CSS_SET_TABLE_SIZE     (1 << CSS_SET_HASH_BITS)
-static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
+static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
 
-static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
+static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
 {
        int i;
-       int index;
-       unsigned long tmp = 0UL;
+       unsigned long key = 0UL;
 
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
-               tmp += (unsigned long)css[i];
-       tmp = (tmp >> 16) ^ tmp;
+               key += (unsigned long)css[i];
+       key = (key >> 16) ^ key;
 
-       index = hash_long(tmp, CSS_SET_HASH_BITS);
-
-       return &css_set_table[index];
+       return key;
 }
 
 /* We don't maintain the lists running through each css_set to its
@@ -418,7 +414,7 @@ static void __put_css_set(struct css_set *cg, int taskexit)
        }
 
        /* This css_set is dead. unlink it and release cgroup refcounts */
-       hlist_del(&cg->hlist);
+       hash_del(&cg->hlist);
        css_set_count--;
 
        list_for_each_entry_safe(link, saved_link, &cg->cg_links,
@@ -426,12 +422,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
                struct cgroup *cgrp = link->cgrp;
                list_del(&link->cg_link_list);
                list_del(&link->cgrp_link_list);
+
+               /*
+                * We may not be holding cgroup_mutex, and if cgrp->count is
+                * dropped to 0 the cgroup can be destroyed at any time, hence
+                * rcu_read_lock is used to keep it alive.
+                */
+               rcu_read_lock();
                if (atomic_dec_and_test(&cgrp->count) &&
                    notify_on_release(cgrp)) {
                        if (taskexit)
                                set_bit(CGRP_RELEASABLE, &cgrp->flags);
                        check_for_release(cgrp);
                }
+               rcu_read_unlock();
 
                kfree(link);
        }
@@ -550,9 +554,9 @@ static struct css_set *find_existing_css_set(
 {
        int i;
        struct cgroupfs_root *root = cgrp->root;
-       struct hlist_head *hhead;
        struct hlist_node *node;
        struct css_set *cg;
+       unsigned long key;
 
        /*
         * Build the set of subsystem state objects that we want to see in the
@@ -572,8 +576,8 @@ static struct css_set *find_existing_css_set(
                }
        }
 
-       hhead = css_set_hash(template);
-       hlist_for_each_entry(cg, node, hhead, hlist) {
+       key = css_set_hash(template);
+       hash_for_each_possible(css_set_table, cg, node, hlist, key) {
                if (!compare_css_sets(cg, oldcg, cgrp, template))
                        continue;
 
@@ -657,8 +661,8 @@ static struct css_set *find_css_set(
 
        struct list_head tmp_cg_links;
 
-       struct hlist_head *hhead;
        struct cg_cgroup_link *link;
+       unsigned long key;
 
        /* First see if we already have a cgroup group that matches
         * the desired set */
@@ -704,8 +708,8 @@ static struct css_set *find_css_set(
        css_set_count++;
 
        /* Add this cgroup group to the hash table */
-       hhead = css_set_hash(res->subsys);
-       hlist_add_head(&res->hlist, hhead);
+       key = css_set_hash(res->subsys);
+       hash_add(css_set_table, &res->hlist, key);
 
        write_unlock(&css_set_lock);
 
@@ -856,47 +860,54 @@ static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
        return inode;
 }
 
-static void cgroup_diput(struct dentry *dentry, struct inode *inode)
+static void cgroup_free_fn(struct work_struct *work)
 {
-       /* is dentry a directory ? if so, kfree() associated cgroup */
-       if (S_ISDIR(inode->i_mode)) {
-               struct cgroup *cgrp = dentry->d_fsdata;
-               struct cgroup_subsys *ss;
-               BUG_ON(!(cgroup_is_removed(cgrp)));
-               /* It's possible for external users to be holding css
-                * reference counts on a cgroup; css_put() needs to
-                * be able to access the cgroup after decrementing
-                * the reference count in order to know if it needs to
-                * queue the cgroup to be handled by the release
-                * agent */
-               synchronize_rcu();
+       struct cgroup *cgrp = container_of(work, struct cgroup, free_work);
+       struct cgroup_subsys *ss;
 
-               mutex_lock(&cgroup_mutex);
-               /*
-                * Release the subsystem state objects.
-                */
-               for_each_subsys(cgrp->root, ss)
-                       ss->css_free(cgrp);
+       mutex_lock(&cgroup_mutex);
+       /*
+        * Release the subsystem state objects.
+        */
+       for_each_subsys(cgrp->root, ss)
+               ss->css_free(cgrp);
 
-               cgrp->root->number_of_cgroups--;
-               mutex_unlock(&cgroup_mutex);
+       cgrp->root->number_of_cgroups--;
+       mutex_unlock(&cgroup_mutex);
 
-               /*
-                * Drop the active superblock reference that we took when we
-                * created the cgroup
-                */
-               deactivate_super(cgrp->root->sb);
+       /*
+        * Drop the active superblock reference that we took when we
+        * created the cgroup
+        */
+       deactivate_super(cgrp->root->sb);
 
-               /*
-                * if we're getting rid of the cgroup, refcount should ensure
-                * that there are no pidlists left.
-                */
-               BUG_ON(!list_empty(&cgrp->pidlists));
+       /*
+        * if we're getting rid of the cgroup, refcount should ensure
+        * that there are no pidlists left.
+        */
+       BUG_ON(!list_empty(&cgrp->pidlists));
 
-               simple_xattrs_free(&cgrp->xattrs);
+       simple_xattrs_free(&cgrp->xattrs);
 
-               ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
-               kfree_rcu(cgrp, rcu_head);
+       ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
+       kfree(cgrp);
+}
+
+static void cgroup_free_rcu(struct rcu_head *head)
+{
+       struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
+
+       schedule_work(&cgrp->free_work);
+}
+
+static void cgroup_diput(struct dentry *dentry, struct inode *inode)
+{
+       /* is dentry a directory ? if so, kfree() associated cgroup */
+       if (S_ISDIR(inode->i_mode)) {
+               struct cgroup *cgrp = dentry->d_fsdata;
+
+               BUG_ON(!(cgroup_is_removed(cgrp)));
+               call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
        } else {
                struct cfent *cfe = __d_cfe(dentry);
                struct cgroup *cgrp = dentry->d_parent->d_fsdata;
@@ -925,13 +936,17 @@ static void remove_dir(struct dentry *d)
        dput(parent);
 }
 
-static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
+static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
 {
        struct cfent *cfe;
 
        lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
+       /*
+        * If we're doing cleanup due to failure of cgroup_create(),
+        * the corresponding @cfe may not exist.
+        */
        list_for_each_entry(cfe, &cgrp->files, node) {
                struct dentry *d = cfe->dentry;
 
@@ -944,9 +959,8 @@ static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
                list_del_init(&cfe->node);
                dput(d);
 
-               return 0;
+               break;
        }
-       return -ENOENT;
 }
 
 /**
@@ -1083,7 +1097,6 @@ static int rebind_subsystems(struct cgroupfs_root *root,
                }
        }
        root->subsys_mask = root->actual_subsys_mask = final_subsys_mask;
-       synchronize_rcu();
 
        return 0;
 }
@@ -1393,6 +1406,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
        INIT_LIST_HEAD(&cgrp->allcg_node);
        INIT_LIST_HEAD(&cgrp->release_list);
        INIT_LIST_HEAD(&cgrp->pidlists);
+       INIT_WORK(&cgrp->free_work, cgroup_free_fn);
        mutex_init(&cgrp->pidlist_mutex);
        INIT_LIST_HEAD(&cgrp->event_list);
        spin_lock_init(&cgrp->event_list_lock);
@@ -1597,6 +1611,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                struct cgroupfs_root *existing_root;
                const struct cred *cred;
                int i;
+               struct hlist_node *node;
+               struct css_set *cg;
 
                BUG_ON(sb->s_root != NULL);
 
@@ -1650,14 +1666,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                /* Link the top cgroup in this hierarchy into all
                 * the css_set objects */
                write_lock(&css_set_lock);
-               for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
-                       struct hlist_head *hhead = &css_set_table[i];
-                       struct hlist_node *node;
-                       struct css_set *cg;
-
-                       hlist_for_each_entry(cg, node, hhead, hlist)
-                               link_css_set(&tmp_cg_links, cg, root_cgrp);
-               }
+               hash_for_each(css_set_table, i, node, cg, hlist)
+                       link_css_set(&tmp_cg_links, cg, root_cgrp);
                write_unlock(&css_set_lock);
 
                free_cg_links(&tmp_cg_links);
@@ -1773,7 +1783,7 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
        rcu_lockdep_assert(rcu_read_lock_held() || cgroup_lock_is_held(),
                           "cgroup_path() called without proper locking");
 
-       if (!dentry || cgrp == dummytop) {
+       if (cgrp == dummytop) {
                /*
                 * Inactive subsystems have no dentry for their root
                 * cgroup
@@ -1982,7 +1992,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
                        ss->attach(cgrp, &tset);
        }
 
-       synchronize_rcu();
 out:
        if (retval) {
                for_each_subsys(root, ss) {
@@ -2151,7 +2160,6 @@ static int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
        /*
         * step 5: success! and cleanup
         */
-       synchronize_rcu();
        retval = 0;
 out_put_css_set_refs:
        if (retval) {
@@ -2769,14 +2777,14 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
                if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent)
                        continue;
 
-               if (is_add)
+               if (is_add) {
                        err = cgroup_add_file(cgrp, subsys, cft);
-               else
-                       err = cgroup_rm_file(cgrp, cft);
-               if (err) {
-                       pr_warning("cgroup_addrm_files: failed to %s %s, err=%d\n",
-                                  is_add ? "add" : "remove", cft->name, err);
+                       if (err)
+                               pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
+                                       cft->name, err);
                        ret = err;
+               } else {
+                       cgroup_rm_file(cgrp, cft);
                }
        }
        return ret;
@@ -3017,6 +3025,32 @@ struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
 }
 EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
 
+/**
+ * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
+ * @pos: cgroup of interest
+ *
+ * Return the rightmost descendant of @pos.  If there's no descendant,
+ * @pos is returned.  This can be used during pre-order traversal to skip
+ * subtree of @pos.
+ */
+struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
+{
+       struct cgroup *last, *tmp;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       do {
+               last = pos;
+               /* ->prev isn't RCU safe, walk ->next till the end */
+               pos = NULL;
+               list_for_each_entry_rcu(tmp, &last->children, sibling)
+                       pos = tmp;
+       } while (pos);
+
+       return last;
+}
+EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
+
 static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
 {
        struct cgroup *last;
@@ -3752,8 +3786,13 @@ static void cgroup_event_remove(struct work_struct *work)
                        remove);
        struct cgroup *cgrp = event->cgrp;
 
+       remove_wait_queue(event->wqh, &event->wait);
+
        event->cft->unregister_event(cgrp, event->cft, event->eventfd);
 
+       /* Notify userspace the event is going away. */
+       eventfd_signal(event->eventfd, 1);
+
        eventfd_ctx_put(event->eventfd);
        kfree(event);
        dput(cgrp->dentry);
@@ -3773,15 +3812,25 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
        unsigned long flags = (unsigned long)key;
 
        if (flags & POLLHUP) {
-               __remove_wait_queue(event->wqh, &event->wait);
-               spin_lock(&cgrp->event_list_lock);
-               list_del_init(&event->list);
-               spin_unlock(&cgrp->event_list_lock);
                /*
-                * We are in atomic context, but cgroup_event_remove() may
-                * sleep, so we have to call it in workqueue.
+                * If the event has been detached at cgroup removal, we
+                * can simply return knowing the other side will cleanup
+                * for us.
+                *
+                * We can't race against event freeing since the other
+                * side will require wqh->lock via remove_wait_queue(),
+                * which we hold.
                 */
-               schedule_work(&event->remove);
+               spin_lock(&cgrp->event_list_lock);
+               if (!list_empty(&event->list)) {
+                       list_del_init(&event->list);
+                       /*
+                        * We are in atomic context, but cgroup_event_remove()
+                        * may sleep, so we have to call it in workqueue.
+                        */
+                       schedule_work(&event->remove);
+               }
+               spin_unlock(&cgrp->event_list_lock);
        }
 
        return 0;
@@ -3807,6 +3856,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
                                      const char *buffer)
 {
        struct cgroup_event *event = NULL;
+       struct cgroup *cgrp_cfile;
        unsigned int efd, cfd;
        struct file *efile = NULL;
        struct file *cfile = NULL;
@@ -3862,6 +3912,16 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
                goto fail;
        }
 
+       /*
+        * The file to be monitored must be in the same cgroup as
+        * cgroup.event_control is.
+        */
+       cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
+       if (cgrp_cfile != cgrp) {
+               ret = -EINVAL;
+               goto fail;
+       }
+
        if (!event->cft->register_event || !event->cft->unregister_event) {
                ret = -EINVAL;
                goto fail;
@@ -4135,6 +4195,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
 
        init_cgroup_housekeeping(cgrp);
 
+       dentry->d_fsdata = cgrp;
+       cgrp->dentry = dentry;
+
        cgrp->parent = parent;
        cgrp->root = parent->root;
        cgrp->top_cgroup = parent->top_cgroup;
@@ -4172,8 +4235,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
        lockdep_assert_held(&dentry->d_inode->i_mutex);
 
        /* allocation complete, commit to creation */
-       dentry->d_fsdata = cgrp;
-       cgrp->dentry = dentry;
        list_add_tail(&cgrp->allcg_node, &root->allcg_list);
        list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
        root->number_of_cgroups++;
@@ -4340,20 +4401,14 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        /*
         * Unregister events and notify userspace.
         * Notify userspace about cgroup removing only after rmdir of cgroup
-        * directory to avoid race between userspace and kernelspace. Use
-        * a temporary list to avoid a deadlock with cgroup_event_wake(). Since
-        * cgroup_event_wake() is called with the wait queue head locked,
-        * remove_wait_queue() cannot be called while holding event_list_lock.
+        * directory to avoid race between userspace and kernelspace.
         */
        spin_lock(&cgrp->event_list_lock);
-       list_splice_init(&cgrp->event_list, &tmp_list);
-       spin_unlock(&cgrp->event_list_lock);
-       list_for_each_entry_safe(event, tmp, &tmp_list, list) {
+       list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
                list_del_init(&event->list);
-               remove_wait_queue(event->wqh, &event->wait);
-               eventfd_signal(event->eventfd, 1);
                schedule_work(&event->remove);
        }
+       spin_unlock(&cgrp->event_list_lock);
 
        return 0;
 }
@@ -4438,6 +4493,9 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
 {
        struct cgroup_subsys_state *css;
        int i, ret;
+       struct hlist_node *node, *tmp;
+       struct css_set *cg;
+       unsigned long key;
 
        /* check name and function validity */
        if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
@@ -4503,23 +4561,17 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
         * this is all done under the css_set_lock.
         */
        write_lock(&css_set_lock);
-       for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
-               struct css_set *cg;
-               struct hlist_node *node, *tmp;
-               struct hlist_head *bucket = &css_set_table[i], *new_bucket;
-
-               hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) {
-                       /* skip entries that we already rehashed */
-                       if (cg->subsys[ss->subsys_id])
-                               continue;
-                       /* remove existing entry */
-                       hlist_del(&cg->hlist);
-                       /* set new value */
-                       cg->subsys[ss->subsys_id] = css;
-                       /* recompute hash and restore entry */
-                       new_bucket = css_set_hash(cg->subsys);
-                       hlist_add_head(&cg->hlist, new_bucket);
-               }
+       hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) {
+               /* skip entries that we already rehashed */
+               if (cg->subsys[ss->subsys_id])
+                       continue;
+               /* remove existing entry */
+               hash_del(&cg->hlist);
+               /* set new value */
+               cg->subsys[ss->subsys_id] = css;
+               /* recompute hash and restore entry */
+               key = css_set_hash(cg->subsys);
+               hash_add(css_set_table, node, key);
        }
        write_unlock(&css_set_lock);
 
@@ -4551,7 +4603,6 @@ EXPORT_SYMBOL_GPL(cgroup_load_subsys);
 void cgroup_unload_subsys(struct cgroup_subsys *ss)
 {
        struct cg_cgroup_link *link;
-       struct hlist_head *hhead;
 
        BUG_ON(ss->module == NULL);
 
@@ -4585,11 +4636,12 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
        write_lock(&css_set_lock);
        list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
                struct css_set *cg = link->cg;
+               unsigned long key;
 
-               hlist_del(&cg->hlist);
+               hash_del(&cg->hlist);
                cg->subsys[ss->subsys_id] = NULL;
-               hhead = css_set_hash(cg->subsys);
-               hlist_add_head(&cg->hlist, hhead);
+               key = css_set_hash(cg->subsys);
+               hash_add(css_set_table, &cg->hlist, key);
        }
        write_unlock(&css_set_lock);
 
@@ -4631,9 +4683,6 @@ int __init cgroup_init_early(void)
        list_add(&init_css_set_link.cg_link_list,
                 &init_css_set.cg_links);
 
-       for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
-               INIT_HLIST_HEAD(&css_set_table[i]);
-
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
 
@@ -4667,7 +4716,7 @@ int __init cgroup_init(void)
 {
        int err;
        int i;
-       struct hlist_head *hhead;
+       unsigned long key;
 
        err = bdi_init(&cgroup_backing_dev_info);
        if (err)
@@ -4686,8 +4735,8 @@ int __init cgroup_init(void)
        }
 
        /* Add init_css_set to the hash table */
-       hhead = css_set_hash(init_css_set.subsys);
-       hlist_add_head(&init_css_set.hlist, hhead);
+       key = css_set_hash(init_css_set.subsys);
+       hash_add(css_set_table, &init_css_set.hlist, key);
        BUG_ON(!init_root_id(&rootnode));
 
        cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
@@ -4982,8 +5031,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
        }
        task_unlock(tsk);
 
-       if (cg)
-               put_css_set_taskexit(cg);
+       put_css_set_taskexit(cg);
 }
 
 /**
index f6150e92dfc9366488ce45a2b91e98b9fbd7e150..36700e9e2be9125f976963cb1fca4316460dee00 100644 (file)
@@ -535,9 +535,11 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
        return 0;
 }
 
-asmlinkage long
-compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
-       struct compat_rusage __user *ru)
+COMPAT_SYSCALL_DEFINE4(wait4,
+       compat_pid_t, pid,
+       compat_uint_t __user *, stat_addr,
+       int, options,
+       struct compat_rusage __user *, ru)
 {
        if (!ru) {
                return sys_wait4(pid, stat_addr, options, NULL);
@@ -564,9 +566,10 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
        }
 }
 
-asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
-               struct compat_siginfo __user *uinfo, int options,
-               struct compat_rusage __user *uru)
+COMPAT_SYSCALL_DEFINE5(waitid,
+               int, which, compat_pid_t, pid,
+               struct compat_siginfo __user *, uinfo, int, options,
+               struct compat_rusage __user *, uru)
 {
        siginfo_t info;
        struct rusage ru;
@@ -584,7 +587,11 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
                return ret;
 
        if (uru) {
-               ret = put_compat_rusage(&ru, uru);
+               /* sys_waitid() overwrites everything in ru */
+               if (COMPAT_USE_64BIT_TIME)
+                       ret = copy_to_user(uru, &ru, sizeof(ru));
+               else
+                       ret = put_compat_rusage(&ru, uru);
                if (ret)
                        return ret;
        }
@@ -994,7 +1001,7 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
        sigset_from_compat(&s, &s32);
 
        if (uts) {
-               if (get_compat_timespec(&t, uts))
+               if (compat_get_timespec(&t, uts))
                        return -EFAULT;
        }
 
index e0e07fd55508eeca456e7c8e6567dec1d25a9586..65349f07b8782a19f13014e66b8ff8ac0e7a5ecf 100644 (file)
@@ -1,29 +1,41 @@
+/*
+ * Context tracking: Probe on high level context boundaries such as kernel
+ * and userspace. This includes syscalls and exceptions entry/exit.
+ *
+ * This is used by RCU to remove its dependency on the timer tick while a CPU
+ * runs in userspace.
+ *
+ *  Started by Frederic Weisbecker:
+ *
+ * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
+ *
+ * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
+ * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
+ *
+ */
+
 #include <linux/context_tracking.h>
+#include <linux/kvm_host.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
-#include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/export.h>
 
-struct context_tracking {
-       /*
-        * When active is false, hooks are not set to
-        * minimize overhead: TIF flags are cleared
-        * and calls to user_enter/exit are ignored. This
-        * may be further optimized using static keys.
-        */
-       bool active;
-       enum {
-               IN_KERNEL = 0,
-               IN_USER,
-       } state;
-};
-
-static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
+DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
        .active = true,
 #endif
 };
 
+/**
+ * user_enter - Inform the context tracking that the CPU is going to
+ *              enter userspace mode.
+ *
+ * This function must be called right before we switch from the kernel
+ * to userspace, when it's guaranteed the remaining kernel instructions
+ * to execute won't use any RCU read side critical section because this
+ * function sets RCU in extended quiescent state.
+ */
 void user_enter(void)
 {
        unsigned long flags;
@@ -39,40 +51,90 @@ void user_enter(void)
        if (in_interrupt())
                return;
 
+       /* Kernel threads aren't supposed to go to userspace */
        WARN_ON_ONCE(!current->mm);
 
        local_irq_save(flags);
        if (__this_cpu_read(context_tracking.active) &&
            __this_cpu_read(context_tracking.state) != IN_USER) {
-               __this_cpu_write(context_tracking.state, IN_USER);
+               /*
+                * At this stage, only low level arch entry code remains and
+                * then we'll run in userspace. We can assume there won't be
+                * any RCU read-side critical section until the next call to
+                * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
+                * on the tick.
+                */
+               vtime_user_enter(current);
                rcu_user_enter();
+               __this_cpu_write(context_tracking.state, IN_USER);
        }
        local_irq_restore(flags);
 }
 
+
+/**
+ * user_exit - Inform the context tracking that the CPU is
+ *             exiting userspace mode and entering the kernel.
+ *
+ * This function must be called after we entered the kernel from userspace
+ * before any use of RCU read side critical section. This potentially include
+ * any high level kernel code like syscalls, exceptions, signal handling, etc...
+ *
+ * This call supports re-entrancy. This way it can be called from any exception
+ * handler without needing to know if we came from userspace or not.
+ */
 void user_exit(void)
 {
        unsigned long flags;
 
-       /*
-        * Some contexts may involve an exception occuring in an irq,
-        * leading to that nesting:
-        * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
-        * This would mess up the dyntick_nesting count though. And rcu_irq_*()
-        * helpers are enough to protect RCU uses inside the exception. So
-        * just return immediately if we detect we are in an IRQ.
-        */
        if (in_interrupt())
                return;
 
        local_irq_save(flags);
        if (__this_cpu_read(context_tracking.state) == IN_USER) {
-               __this_cpu_write(context_tracking.state, IN_KERNEL);
+               /*
+                * We are going to run code that may use RCU. Inform
+                * RCU core about that (ie: we may need the tick again).
+                */
                rcu_user_exit();
+               vtime_user_exit(current);
+               __this_cpu_write(context_tracking.state, IN_KERNEL);
        }
        local_irq_restore(flags);
 }
 
+void guest_enter(void)
+{
+       if (vtime_accounting_enabled())
+               vtime_guest_enter(current);
+       else
+               __guest_enter();
+}
+EXPORT_SYMBOL_GPL(guest_enter);
+
+void guest_exit(void)
+{
+       if (vtime_accounting_enabled())
+               vtime_guest_exit(current);
+       else
+               __guest_exit();
+}
+EXPORT_SYMBOL_GPL(guest_exit);
+
+
+/**
+ * context_tracking_task_switch - context switch the syscall callbacks
+ * @prev: the task that is being switched out
+ * @next: the task that is being switched in
+ *
+ * The context tracking uses the syscall slow path to implement its user-kernel
+ * boundaries probes on syscalls. This way it doesn't impact the syscall fast
+ * path on CPUs that don't do context tracking.
+ *
+ * But we need to clear the flag on the previous task because it may later
+ * migrate to some CPU that doesn't do the context tracking. As such the TIF
+ * flag may not be desired there.
+ */
 void context_tracking_task_switch(struct task_struct *prev,
                             struct task_struct *next)
 {
index 3046a503242c8eb67f031d445713a9e55c986e73..b5e4ab2d427e874404347c7e20186ed4cfd2f488 100644 (file)
@@ -224,11 +224,13 @@ void clear_tasks_mm_cpumask(int cpu)
 static inline void check_for_tasks(int cpu)
 {
        struct task_struct *p;
+       cputime_t utime, stime;
 
        write_lock_irq(&tasklist_lock);
        for_each_process(p) {
+               task_cputime(p, &utime, &stime);
                if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
-                   (p->utime || p->stime))
+                   (utime || stime))
                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
                                "(state = %ld, flags = %x)\n",
                                p->comm, task_pid_nr(p), cpu,
@@ -254,6 +256,8 @@ static int __ref take_cpu_down(void *_param)
                return err;
 
        cpu_notify(CPU_DYING | param->mod, param->hcpu);
+       /* Park the stopper thread */
+       kthread_park(current);
        return 0;
 }
 
index 7bb63eea6eb85e4781eac7289e6f4a4edf6d3520..4f9dfe43ecbd777d7bf03bed72c5da566e952b62 100644 (file)
 #include <linux/workqueue.h>
 #include <linux/cgroup.h>
 
-/*
- * Workqueue for cpuset related tasks.
- *
- * Using kevent workqueue may cause deadlock when memory_migrate
- * is set. So we create a separate workqueue thread for cpuset.
- */
-static struct workqueue_struct *cpuset_wq;
-
 /*
  * Tracks how many cpusets are currently defined in system.
  * When there is only one cpuset (the root cpuset) we can
@@ -95,18 +87,21 @@ struct cpuset {
        cpumask_var_t cpus_allowed;     /* CPUs allowed to tasks in cpuset */
        nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
 
-       struct cpuset *parent;          /* my parent */
-
        struct fmeter fmeter;           /* memory_pressure filter */
 
+       /*
+        * Tasks are being attached to this cpuset.  Used to prevent
+        * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
+        */
+       int attach_in_progress;
+
        /* partition number for rebuild_sched_domains() */
        int pn;
 
        /* for custom sched domain */
        int relax_domain_level;
 
-       /* used for walking a cpuset hierarchy */
-       struct list_head stack_list;
+       struct work_struct hotplug_work;
 };
 
 /* Retrieve the cpuset for a cgroup */
@@ -123,6 +118,15 @@ static inline struct cpuset *task_cs(struct task_struct *task)
                            struct cpuset, css);
 }
 
+static inline struct cpuset *parent_cs(const struct cpuset *cs)
+{
+       struct cgroup *pcgrp = cs->css.cgroup->parent;
+
+       if (pcgrp)
+               return cgroup_cs(pcgrp);
+       return NULL;
+}
+
 #ifdef CONFIG_NUMA
 static inline bool task_has_mempolicy(struct task_struct *task)
 {
@@ -138,6 +142,7 @@ static inline bool task_has_mempolicy(struct task_struct *task)
 
 /* bits in struct cpuset flags field */
 typedef enum {
+       CS_ONLINE,
        CS_CPU_EXCLUSIVE,
        CS_MEM_EXCLUSIVE,
        CS_MEM_HARDWALL,
@@ -147,13 +152,12 @@ typedef enum {
        CS_SPREAD_SLAB,
 } cpuset_flagbits_t;
 
-/* the type of hotplug event */
-enum hotplug_event {
-       CPUSET_CPU_OFFLINE,
-       CPUSET_MEM_OFFLINE,
-};
-
 /* convenient tests for these bits */
+static inline bool is_cpuset_online(const struct cpuset *cs)
+{
+       return test_bit(CS_ONLINE, &cs->flags);
+}
+
 static inline int is_cpu_exclusive(const struct cpuset *cs)
 {
        return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
@@ -190,27 +194,52 @@ static inline int is_spread_slab(const struct cpuset *cs)
 }
 
 static struct cpuset top_cpuset = {
-       .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
+       .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
+                 (1 << CS_MEM_EXCLUSIVE)),
 };
 
+/**
+ * cpuset_for_each_child - traverse online children of a cpuset
+ * @child_cs: loop cursor pointing to the current child
+ * @pos_cgrp: used for iteration
+ * @parent_cs: target cpuset to walk children of
+ *
+ * Walk @child_cs through the online children of @parent_cs.  Must be used
+ * with RCU read locked.
+ */
+#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)           \
+       cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)      \
+               if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
+
+/**
+ * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
+ * @des_cs: loop cursor pointing to the current descendant
+ * @pos_cgrp: used for iteration
+ * @root_cs: target cpuset to walk ancestor of
+ *
+ * Walk @des_cs through the online descendants of @root_cs.  Must be used
+ * with RCU read locked.  The caller may modify @pos_cgrp by calling
+ * cgroup_rightmost_descendant() to skip subtree.
+ */
+#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)      \
+       cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
+               if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
+
 /*
- * There are two global mutexes guarding cpuset structures.  The first
- * is the main control groups cgroup_mutex, accessed via
- * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
- * callback_mutex, below. They can nest.  It is ok to first take
- * cgroup_mutex, then nest callback_mutex.  We also require taking
- * task_lock() when dereferencing a task's cpuset pointer.  See "The
- * task_lock() exception", at the end of this comment.
- *
- * A task must hold both mutexes to modify cpusets.  If a task
- * holds cgroup_mutex, then it blocks others wanting that mutex,
- * ensuring that it is the only task able to also acquire callback_mutex
- * and be able to modify cpusets.  It can perform various checks on
- * the cpuset structure first, knowing nothing will change.  It can
- * also allocate memory while just holding cgroup_mutex.  While it is
- * performing these checks, various callback routines can briefly
- * acquire callback_mutex to query cpusets.  Once it is ready to make
- * the changes, it takes callback_mutex, blocking everyone else.
+ * There are two global mutexes guarding cpuset structures - cpuset_mutex
+ * and callback_mutex.  The latter may nest inside the former.  We also
+ * require taking task_lock() when dereferencing a task's cpuset pointer.
+ * See "The task_lock() exception", at the end of this comment.
+ *
+ * A task must hold both mutexes to modify cpusets.  If a task holds
+ * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
+ * is the only task able to also acquire callback_mutex and be able to
+ * modify cpusets.  It can perform various checks on the cpuset structure
+ * first, knowing nothing will change.  It can also allocate memory while
+ * just holding cpuset_mutex.  While it is performing these checks, various
+ * callback routines can briefly acquire callback_mutex to query cpusets.
+ * Once it is ready to make the changes, it takes callback_mutex, blocking
+ * everyone else.
  *
  * Calls to the kernel memory allocator can not be made while holding
  * callback_mutex, as that would risk double tripping on callback_mutex
@@ -232,6 +261,7 @@ static struct cpuset top_cpuset = {
  * guidelines for accessing subsystem state in kernel/cgroup.c
  */
 
+static DEFINE_MUTEX(cpuset_mutex);
 static DEFINE_MUTEX(callback_mutex);
 
 /*
@@ -245,6 +275,17 @@ static char cpuset_name[CPUSET_NAME_LEN];
 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
 static DEFINE_SPINLOCK(cpuset_buffer_lock);
 
+/*
+ * CPU / memory hotplug is handled asynchronously.
+ */
+static struct workqueue_struct *cpuset_propagate_hotplug_wq;
+
+static void cpuset_hotplug_workfn(struct work_struct *work);
+static void cpuset_propagate_hotplug_workfn(struct work_struct *work);
+static void schedule_cpuset_propagate_hotplug(struct cpuset *cs);
+
+static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
+
 /*
  * This is ugly, but preserves the userspace API for existing cpuset
  * users. If someone tries to mount the "cpuset" filesystem, we
@@ -289,7 +330,7 @@ static void guarantee_online_cpus(const struct cpuset *cs,
                                  struct cpumask *pmask)
 {
        while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
-               cs = cs->parent;
+               cs = parent_cs(cs);
        if (cs)
                cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
        else
@@ -314,7 +355,7 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 {
        while (cs && !nodes_intersects(cs->mems_allowed,
                                        node_states[N_MEMORY]))
-               cs = cs->parent;
+               cs = parent_cs(cs);
        if (cs)
                nodes_and(*pmask, cs->mems_allowed,
                                        node_states[N_MEMORY]);
@@ -326,7 +367,7 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 /*
  * update task's spread flag if cpuset's page/slab spread flag is set
  *
- * Called with callback_mutex/cgroup_mutex held
+ * Called with callback_mutex/cpuset_mutex held
  */
 static void cpuset_update_task_spread_flag(struct cpuset *cs,
                                        struct task_struct *tsk)
@@ -346,7 +387,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
  *
  * One cpuset is a subset of another if all its allowed CPUs and
  * Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set.  Call holding cgroup_mutex.
+ * are only set if the other's are set.  Call holding cpuset_mutex.
  */
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -395,7 +436,7 @@ static void free_trial_cpuset(struct cpuset *trial)
  * If we replaced the flag and mask values of the current cpuset
  * (cur) with those values in the trial cpuset (trial), would
  * our various subset and exclusive rules still be valid?  Presumes
- * cgroup_mutex held.
+ * cpuset_mutex held.
  *
  * 'cur' is the address of an actual, in-use cpuset.  Operations
  * such as list traversal that depend on the actual address of the
@@ -412,48 +453,58 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 {
        struct cgroup *cont;
        struct cpuset *c, *par;
+       int ret;
+
+       rcu_read_lock();
 
        /* Each of our child cpusets must be a subset of us */
-       list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
-               if (!is_cpuset_subset(cgroup_cs(cont), trial))
-                       return -EBUSY;
-       }
+       ret = -EBUSY;
+       cpuset_for_each_child(c, cont, cur)
+               if (!is_cpuset_subset(c, trial))
+                       goto out;
 
        /* Remaining checks don't apply to root cpuset */
+       ret = 0;
        if (cur == &top_cpuset)
-               return 0;
+               goto out;
 
-       par = cur->parent;
+       par = parent_cs(cur);
 
        /* We must be a subset of our parent cpuset */
+       ret = -EACCES;
        if (!is_cpuset_subset(trial, par))
-               return -EACCES;
+               goto out;
 
        /*
         * If either I or some sibling (!= me) is exclusive, we can't
         * overlap
         */
-       list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
-               c = cgroup_cs(cont);
+       ret = -EINVAL;
+       cpuset_for_each_child(c, cont, par) {
                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
                    c != cur &&
                    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
-                       return -EINVAL;
+                       goto out;
                if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
                    c != cur &&
                    nodes_intersects(trial->mems_allowed, c->mems_allowed))
-                       return -EINVAL;
+                       goto out;
        }
 
-       /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
-       if (cgroup_task_count(cur->css.cgroup)) {
-               if (cpumask_empty(trial->cpus_allowed) ||
-                   nodes_empty(trial->mems_allowed)) {
-                       return -ENOSPC;
-               }
-       }
+       /*
+        * Cpusets with tasks - existing or newly being attached - can't
+        * have empty cpus_allowed or mems_allowed.
+        */
+       ret = -ENOSPC;
+       if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
+           (cpumask_empty(trial->cpus_allowed) ||
+            nodes_empty(trial->mems_allowed)))
+               goto out;
 
-       return 0;
+       ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
 }
 
 #ifdef CONFIG_SMP
@@ -474,31 +525,24 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
        return;
 }
 
-static void
-update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
+static void update_domain_attr_tree(struct sched_domain_attr *dattr,
+                                   struct cpuset *root_cs)
 {
-       LIST_HEAD(q);
-
-       list_add(&c->stack_list, &q);
-       while (!list_empty(&q)) {
-               struct cpuset *cp;
-               struct cgroup *cont;
-               struct cpuset *child;
-
-               cp = list_first_entry(&q, struct cpuset, stack_list);
-               list_del(q.next);
+       struct cpuset *cp;
+       struct cgroup *pos_cgrp;
 
-               if (cpumask_empty(cp->cpus_allowed))
+       rcu_read_lock();
+       cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
+               /* skip the whole subtree if @cp doesn't have any CPU */
+               if (cpumask_empty(cp->cpus_allowed)) {
+                       pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
                        continue;
+               }
 
                if (is_sched_load_balance(cp))
                        update_domain_attr(dattr, cp);
-
-               list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
-                       child = cgroup_cs(cont);
-                       list_add_tail(&child->stack_list, &q);
-               }
        }
+       rcu_read_unlock();
 }
 
 /*
@@ -520,7 +564,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
  * domains when operating in the severe memory shortage situations
  * that could cause allocation failures below.
  *
- * Must be called with cgroup_lock held.
+ * Must be called with cpuset_mutex held.
  *
  * The three key local variables below are:
  *    q  - a linked-list queue of cpuset pointers, used to implement a
@@ -558,7 +602,6 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
 static int generate_sched_domains(cpumask_var_t **domains,
                        struct sched_domain_attr **attributes)
 {
-       LIST_HEAD(q);           /* queue of cpusets to be scanned */
        struct cpuset *cp;      /* scans q */
        struct cpuset **csa;    /* array of all cpuset ptrs */
        int csn;                /* how many cpuset ptrs in csa so far */
@@ -567,6 +610,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
        struct sched_domain_attr *dattr;  /* attributes for custom domains */
        int ndoms = 0;          /* number of sched domains in result */
        int nslot;              /* next empty doms[] struct cpumask slot */
+       struct cgroup *pos_cgrp;
 
        doms = NULL;
        dattr = NULL;
@@ -594,33 +638,27 @@ static int generate_sched_domains(cpumask_var_t **domains,
                goto done;
        csn = 0;
 
-       list_add(&top_cpuset.stack_list, &q);
-       while (!list_empty(&q)) {
-               struct cgroup *cont;
-               struct cpuset *child;   /* scans child cpusets of cp */
-
-               cp = list_first_entry(&q, struct cpuset, stack_list);
-               list_del(q.next);
-
-               if (cpumask_empty(cp->cpus_allowed))
-                       continue;
-
+       rcu_read_lock();
+       cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
                /*
-                * All child cpusets contain a subset of the parent's cpus, so
-                * just skip them, and then we call update_domain_attr_tree()
-                * to calc relax_domain_level of the corresponding sched
-                * domain.
+                * Continue traversing beyond @cp iff @cp has some CPUs and
+                * isn't load balancing.  The former is obvious.  The
+                * latter: All child cpusets contain a subset of the
+                * parent's cpus, so just skip them, and then we call
+                * update_domain_attr_tree() to calc relax_domain_level of
+                * the corresponding sched domain.
                 */
-               if (is_sched_load_balance(cp)) {
-                       csa[csn++] = cp;
+               if (!cpumask_empty(cp->cpus_allowed) &&
+                   !is_sched_load_balance(cp))
                        continue;
-               }
 
-               list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
-                       child = cgroup_cs(cont);
-                       list_add_tail(&child->stack_list, &q);
-               }
-       }
+               if (is_sched_load_balance(cp))
+                       csa[csn++] = cp;
+
+               /* skip @cp's subtree */
+               pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+       }
+       rcu_read_unlock();
 
        for (i = 0; i < csn; i++)
                csa[i]->pn = i;
@@ -725,25 +763,25 @@ done:
 /*
  * Rebuild scheduler domains.
  *
- * Call with neither cgroup_mutex held nor within get_online_cpus().
- * Takes both cgroup_mutex and get_online_cpus().
+ * If the flag 'sched_load_balance' of any cpuset with non-empty
+ * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
+ * which has that flag enabled, or if any cpuset with a non-empty
+ * 'cpus' is removed, then call this routine to rebuild the
+ * scheduler's dynamic sched domains.
  *
- * Cannot be directly called from cpuset code handling changes
- * to the cpuset pseudo-filesystem, because it cannot be called
- * from code that already holds cgroup_mutex.
+ * Call with cpuset_mutex held.  Takes get_online_cpus().
  */
-static void do_rebuild_sched_domains(struct work_struct *unused)
+static void rebuild_sched_domains_locked(void)
 {
        struct sched_domain_attr *attr;
        cpumask_var_t *doms;
        int ndoms;
 
+       lockdep_assert_held(&cpuset_mutex);
        get_online_cpus();
 
        /* Generate domain masks and attrs */
-       cgroup_lock();
        ndoms = generate_sched_domains(&doms, &attr);
-       cgroup_unlock();
 
        /* Have scheduler rebuild the domains */
        partition_sched_domains(ndoms, doms, attr);
@@ -751,7 +789,7 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
        put_online_cpus();
 }
 #else /* !CONFIG_SMP */
-static void do_rebuild_sched_domains(struct work_struct *unused)
+static void rebuild_sched_domains_locked(void)
 {
 }
 
@@ -763,44 +801,11 @@ static int generate_sched_domains(cpumask_var_t **domains,
 }
 #endif /* CONFIG_SMP */
 
-static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
-
-/*
- * Rebuild scheduler domains, asynchronously via workqueue.
- *
- * If the flag 'sched_load_balance' of any cpuset with non-empty
- * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
- * which has that flag enabled, or if any cpuset with a non-empty
- * 'cpus' is removed, then call this routine to rebuild the
- * scheduler's dynamic sched domains.
- *
- * The rebuild_sched_domains() and partition_sched_domains()
- * routines must nest cgroup_lock() inside get_online_cpus(),
- * but such cpuset changes as these must nest that locking the
- * other way, holding cgroup_lock() for much of the code.
- *
- * So in order to avoid an ABBA deadlock, the cpuset code handling
- * these user changes delegates the actual sched domain rebuilding
- * to a separate workqueue thread, which ends up processing the
- * above do_rebuild_sched_domains() function.
- */
-static void async_rebuild_sched_domains(void)
-{
-       queue_work(cpuset_wq, &rebuild_sched_domains_work);
-}
-
-/*
- * Accomplishes the same scheduler domain rebuild as the above
- * async_rebuild_sched_domains(), however it directly calls the
- * rebuild routine synchronously rather than calling it via an
- * asynchronous work thread.
- *
- * This can only be called from code that is not holding
- * cgroup_mutex (not nested in a cgroup_lock() call.)
- */
 void rebuild_sched_domains(void)
 {
-       do_rebuild_sched_domains(NULL);
+       mutex_lock(&cpuset_mutex);
+       rebuild_sched_domains_locked();
+       mutex_unlock(&cpuset_mutex);
 }
 
 /**
@@ -808,7 +813,7 @@ void rebuild_sched_domains(void)
  * @tsk: task to test
  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
  *
- * Call with cgroup_mutex held.  May take callback_mutex during call.
+ * Call with cpuset_mutex held.  May take callback_mutex during call.
  * Called for each task in a cgroup by cgroup_scan_tasks().
  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
  * words, if its mask is not equal to its cpuset's mask).
@@ -829,7 +834,7 @@ static int cpuset_test_cpumask(struct task_struct *tsk,
  * cpus_allowed mask needs to be changed.
  *
  * We don't need to re-check for the cgroup/cpuset membership, since we're
- * holding cgroup_lock() at this point.
+ * holding cpuset_mutex at this point.
  */
 static void cpuset_change_cpumask(struct task_struct *tsk,
                                  struct cgroup_scanner *scan)
@@ -842,7 +847,7 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
  *
- * Called with cgroup_mutex held
+ * Called with cpuset_mutex held
  *
  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
  * calling callback functions for each.
@@ -920,7 +925,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        heap_free(&heap);
 
        if (is_load_balanced)
-               async_rebuild_sched_domains();
+               rebuild_sched_domains_locked();
        return 0;
 }
 
@@ -932,7 +937,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
  *    Temporarilly set tasks mems_allowed to target nodes of migration,
  *    so that the migration code can allocate pages on these nodes.
  *
- *    Call holding cgroup_mutex, so current's cpuset won't change
+ *    Call holding cpuset_mutex, so current's cpuset won't change
  *    during this call, as manage_mutex holds off any cpuset_attach()
  *    calls.  Therefore we don't need to take task_lock around the
  *    call to guarantee_online_mems(), as we know no one is changing
@@ -1007,7 +1012,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
 /*
  * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
  * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
- * memory_migrate flag is set. Called with cgroup_mutex held.
+ * memory_migrate flag is set. Called with cpuset_mutex held.
  */
 static void cpuset_change_nodemask(struct task_struct *p,
                                   struct cgroup_scanner *scan)
@@ -1016,7 +1021,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
        struct cpuset *cs;
        int migrate;
        const nodemask_t *oldmem = scan->data;
-       static nodemask_t newmems;      /* protected by cgroup_mutex */
+       static nodemask_t newmems;      /* protected by cpuset_mutex */
 
        cs = cgroup_cs(scan->cg);
        guarantee_online_mems(cs, &newmems);
@@ -1043,7 +1048,7 @@ static void *cpuset_being_rebound;
  * @oldmem: old mems_allowed of cpuset cs
  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
  *
- * Called with cgroup_mutex held
+ * Called with cpuset_mutex held
  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
  * if @heap != NULL.
  */
@@ -1065,7 +1070,7 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
         * take while holding tasklist_lock.  Forks can happen - the
         * mpol_dup() cpuset_being_rebound check will catch such forks,
         * and rebind their vma mempolicies too.  Because we still hold
-        * the global cgroup_mutex, we know that no other rebind effort
+        * the global cpuset_mutex, we know that no other rebind effort
         * will be contending for the global variable cpuset_being_rebound.
         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
         * is idempotent.  Also migrate pages in each mm to new nodes.
@@ -1084,7 +1089,7 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
  * mempolicies and if the cpuset is marked 'memory_migrate',
  * migrate the tasks pages to the new memory.
  *
- * Call with cgroup_mutex held.  May take callback_mutex during call.
+ * Call with cpuset_mutex held.  May take callback_mutex during call.
  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
  * their mempolicies to the cpusets new mems_allowed.
@@ -1168,7 +1173,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
                cs->relax_domain_level = val;
                if (!cpumask_empty(cs->cpus_allowed) &&
                    is_sched_load_balance(cs))
-                       async_rebuild_sched_domains();
+                       rebuild_sched_domains_locked();
        }
 
        return 0;
@@ -1182,7 +1187,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
  * Called by cgroup_scan_tasks() for each task in a cgroup.
  *
  * We don't need to re-check for the cgroup/cpuset membership, since we're
- * holding cgroup_lock() at this point.
+ * holding cpuset_mutex at this point.
  */
 static void cpuset_change_flag(struct task_struct *tsk,
                                struct cgroup_scanner *scan)
@@ -1195,7 +1200,7 @@ static void cpuset_change_flag(struct task_struct *tsk,
  * @cs: the cpuset in which each task's spread flags needs to be changed
  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
  *
- * Called with cgroup_mutex held
+ * Called with cpuset_mutex held
  *
  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
  * calling callback functions for each.
@@ -1220,7 +1225,7 @@ static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
  * cs:         the cpuset to update
  * turning_on:         whether the flag is being set or cleared
  *
- * Call with cgroup_mutex held.
+ * Call with cpuset_mutex held.
  */
 
 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -1260,7 +1265,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
        mutex_unlock(&callback_mutex);
 
        if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
-               async_rebuild_sched_domains();
+               rebuild_sched_domains_locked();
 
        if (spread_flag_changed)
                update_tasks_flags(cs, &heap);
@@ -1368,24 +1373,18 @@ static int fmeter_getrate(struct fmeter *fmp)
        return val;
 }
 
-/*
- * Protected by cgroup_lock. The nodemasks must be stored globally because
- * dynamically allocating them is not allowed in can_attach, and they must
- * persist until attach.
- */
-static cpumask_var_t cpus_attach;
-static nodemask_t cpuset_attach_nodemask_from;
-static nodemask_t cpuset_attach_nodemask_to;
-
-/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
+/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
 static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
        struct cpuset *cs = cgroup_cs(cgrp);
        struct task_struct *task;
        int ret;
 
+       mutex_lock(&cpuset_mutex);
+
+       ret = -ENOSPC;
        if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
-               return -ENOSPC;
+               goto out_unlock;
 
        cgroup_taskset_for_each(task, cgrp, tset) {
                /*
@@ -1397,25 +1396,45 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
                 * set_cpus_allowed_ptr() on all attached tasks before
                 * cpus_allowed may be changed.
                 */
+               ret = -EINVAL;
                if (task->flags & PF_THREAD_BOUND)
-                       return -EINVAL;
-               if ((ret = security_task_setscheduler(task)))
-                       return ret;
+                       goto out_unlock;
+               ret = security_task_setscheduler(task);
+               if (ret)
+                       goto out_unlock;
        }
 
-       /* prepare for attach */
-       if (cs == &top_cpuset)
-               cpumask_copy(cpus_attach, cpu_possible_mask);
-       else
-               guarantee_online_cpus(cs, cpus_attach);
-
-       guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+       /*
+        * Mark attach is in progress.  This makes validate_change() fail
+        * changes which zero cpus/mems_allowed.
+        */
+       cs->attach_in_progress++;
+       ret = 0;
+out_unlock:
+       mutex_unlock(&cpuset_mutex);
+       return ret;
+}
 
-       return 0;
+static void cpuset_cancel_attach(struct cgroup *cgrp,
+                                struct cgroup_taskset *tset)
+{
+       mutex_lock(&cpuset_mutex);
+       cgroup_cs(cgrp)->attach_in_progress--;
+       mutex_unlock(&cpuset_mutex);
 }
 
+/*
+ * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
+ * but we can't allocate it dynamically there.  Define it global and
+ * allocate from cpuset_init().
+ */
+static cpumask_var_t cpus_attach;
+
 static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 {
+       /* static bufs protected by cpuset_mutex */
+       static nodemask_t cpuset_attach_nodemask_from;
+       static nodemask_t cpuset_attach_nodemask_to;
        struct mm_struct *mm;
        struct task_struct *task;
        struct task_struct *leader = cgroup_taskset_first(tset);
@@ -1423,6 +1442,16 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
        struct cpuset *cs = cgroup_cs(cgrp);
        struct cpuset *oldcs = cgroup_cs(oldcgrp);
 
+       mutex_lock(&cpuset_mutex);
+
+       /* prepare for attach */
+       if (cs == &top_cpuset)
+               cpumask_copy(cpus_attach, cpu_possible_mask);
+       else
+               guarantee_online_cpus(cs, cpus_attach);
+
+       guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+
        cgroup_taskset_for_each(task, cgrp, tset) {
                /*
                 * can_attach beforehand should guarantee that this doesn't
@@ -1448,6 +1477,18 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
                                          &cpuset_attach_nodemask_to);
                mmput(mm);
        }
+
+       cs->attach_in_progress--;
+
+       /*
+        * We may have raced with CPU/memory hotunplug.  Trigger hotplug
+        * propagation if @cs doesn't have any CPU or memory.  It will move
+        * the newly added tasks to the nearest parent which can execute.
+        */
+       if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
+               schedule_cpuset_propagate_hotplug(cs);
+
+       mutex_unlock(&cpuset_mutex);
 }
 
 /* The various types of files and directories in a cpuset file system */
@@ -1469,12 +1510,13 @@ typedef enum {
 
 static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
 {
-       int retval = 0;
        struct cpuset *cs = cgroup_cs(cgrp);
        cpuset_filetype_t type = cft->private;
+       int retval = -ENODEV;
 
-       if (!cgroup_lock_live_group(cgrp))
-               return -ENODEV;
+       mutex_lock(&cpuset_mutex);
+       if (!is_cpuset_online(cs))
+               goto out_unlock;
 
        switch (type) {
        case FILE_CPU_EXCLUSIVE:
@@ -1508,18 +1550,20 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
                retval = -EINVAL;
                break;
        }
-       cgroup_unlock();
+out_unlock:
+       mutex_unlock(&cpuset_mutex);
        return retval;
 }
 
 static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
 {
-       int retval = 0;
        struct cpuset *cs = cgroup_cs(cgrp);
        cpuset_filetype_t type = cft->private;
+       int retval = -ENODEV;
 
-       if (!cgroup_lock_live_group(cgrp))
-               return -ENODEV;
+       mutex_lock(&cpuset_mutex);
+       if (!is_cpuset_online(cs))
+               goto out_unlock;
 
        switch (type) {
        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
@@ -1529,7 +1573,8 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
                retval = -EINVAL;
                break;
        }
-       cgroup_unlock();
+out_unlock:
+       mutex_unlock(&cpuset_mutex);
        return retval;
 }
 
@@ -1539,17 +1584,36 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
 static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
                                const char *buf)
 {
-       int retval = 0;
        struct cpuset *cs = cgroup_cs(cgrp);
        struct cpuset *trialcs;
+       int retval = -ENODEV;
+
+       /*
+        * CPU or memory hotunplug may leave @cs w/o any execution
+        * resources, in which case the hotplug code asynchronously updates
+        * configuration and transfers all tasks to the nearest ancestor
+        * which can execute.
+        *
+        * As writes to "cpus" or "mems" may restore @cs's execution
+        * resources, wait for the previously scheduled operations before
+        * proceeding, so that we don't end up keep removing tasks added
+        * after execution capability is restored.
+        *
+        * Flushing cpuset_hotplug_work is enough to synchronize against
+        * hotplug hanlding; however, cpuset_attach() may schedule
+        * propagation work directly.  Flush the workqueue too.
+        */
+       flush_work(&cpuset_hotplug_work);
+       flush_workqueue(cpuset_propagate_hotplug_wq);
 
-       if (!cgroup_lock_live_group(cgrp))
-               return -ENODEV;
+       mutex_lock(&cpuset_mutex);
+       if (!is_cpuset_online(cs))
+               goto out_unlock;
 
        trialcs = alloc_trial_cpuset(cs);
        if (!trialcs) {
                retval = -ENOMEM;
-               goto out;
+               goto out_unlock;
        }
 
        switch (cft->private) {
@@ -1565,8 +1629,8 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
        }
 
        free_trial_cpuset(trialcs);
-out:
-       cgroup_unlock();
+out_unlock:
+       mutex_unlock(&cpuset_mutex);
        return retval;
 }
 
@@ -1790,15 +1854,12 @@ static struct cftype files[] = {
 
 static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
 {
-       struct cgroup *parent_cg = cont->parent;
-       struct cgroup *tmp_cg;
-       struct cpuset *parent, *cs;
+       struct cpuset *cs;
 
-       if (!parent_cg)
+       if (!cont->parent)
                return &top_cpuset.css;
-       parent = cgroup_cs(parent_cg);
 
-       cs = kmalloc(sizeof(*cs), GFP_KERNEL);
+       cs = kzalloc(sizeof(*cs), GFP_KERNEL);
        if (!cs)
                return ERR_PTR(-ENOMEM);
        if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
@@ -1806,22 +1867,38 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
                return ERR_PTR(-ENOMEM);
        }
 
-       cs->flags = 0;
-       if (is_spread_page(parent))
-               set_bit(CS_SPREAD_PAGE, &cs->flags);
-       if (is_spread_slab(parent))
-               set_bit(CS_SPREAD_SLAB, &cs->flags);
        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
        cpumask_clear(cs->cpus_allowed);
        nodes_clear(cs->mems_allowed);
        fmeter_init(&cs->fmeter);
+       INIT_WORK(&cs->hotplug_work, cpuset_propagate_hotplug_workfn);
        cs->relax_domain_level = -1;
 
-       cs->parent = parent;
+       return &cs->css;
+}
+
+static int cpuset_css_online(struct cgroup *cgrp)
+{
+       struct cpuset *cs = cgroup_cs(cgrp);
+       struct cpuset *parent = parent_cs(cs);
+       struct cpuset *tmp_cs;
+       struct cgroup *pos_cg;
+
+       if (!parent)
+               return 0;
+
+       mutex_lock(&cpuset_mutex);
+
+       set_bit(CS_ONLINE, &cs->flags);
+       if (is_spread_page(parent))
+               set_bit(CS_SPREAD_PAGE, &cs->flags);
+       if (is_spread_slab(parent))
+               set_bit(CS_SPREAD_SLAB, &cs->flags);
+
        number_of_cpusets++;
 
-       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cont->flags))
-               goto skip_clone;
+       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
+               goto out_unlock;
 
        /*
         * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
@@ -1836,35 +1913,49 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
         * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
         * (and likewise for mems) to the new cgroup.
         */
-       list_for_each_entry(tmp_cg, &parent_cg->children, sibling) {
-               struct cpuset *tmp_cs = cgroup_cs(tmp_cg);
-
-               if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs))
-                       goto skip_clone;
+       rcu_read_lock();
+       cpuset_for_each_child(tmp_cs, pos_cg, parent) {
+               if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
+                       rcu_read_unlock();
+                       goto out_unlock;
+               }
        }
+       rcu_read_unlock();
 
        mutex_lock(&callback_mutex);
        cs->mems_allowed = parent->mems_allowed;
        cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
        mutex_unlock(&callback_mutex);
-skip_clone:
-       return &cs->css;
+out_unlock:
+       mutex_unlock(&cpuset_mutex);
+       return 0;
+}
+
+static void cpuset_css_offline(struct cgroup *cgrp)
+{
+       struct cpuset *cs = cgroup_cs(cgrp);
+
+       mutex_lock(&cpuset_mutex);
+
+       if (is_sched_load_balance(cs))
+               update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
+
+       number_of_cpusets--;
+       clear_bit(CS_ONLINE, &cs->flags);
+
+       mutex_unlock(&cpuset_mutex);
 }
 
 /*
  * If the cpuset being removed has its flag 'sched_load_balance'
  * enabled, then simulate turning sched_load_balance off, which
- * will call async_rebuild_sched_domains().
+ * will call rebuild_sched_domains_locked().
  */
 
 static void cpuset_css_free(struct cgroup *cont)
 {
        struct cpuset *cs = cgroup_cs(cont);
 
-       if (is_sched_load_balance(cs))
-               update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
-
-       number_of_cpusets--;
        free_cpumask_var(cs->cpus_allowed);
        kfree(cs);
 }
@@ -1872,8 +1963,11 @@ static void cpuset_css_free(struct cgroup *cont)
 struct cgroup_subsys cpuset_subsys = {
        .name = "cpuset",
        .css_alloc = cpuset_css_alloc,
+       .css_online = cpuset_css_online,
+       .css_offline = cpuset_css_offline,
        .css_free = cpuset_css_free,
        .can_attach = cpuset_can_attach,
+       .cancel_attach = cpuset_cancel_attach,
        .attach = cpuset_attach,
        .subsys_id = cpuset_subsys_id,
        .base_cftypes = files,
@@ -1924,7 +2018,9 @@ static void cpuset_do_move_task(struct task_struct *tsk,
 {
        struct cgroup *new_cgroup = scan->data;
 
+       cgroup_lock();
        cgroup_attach_task(new_cgroup, tsk);
+       cgroup_unlock();
 }
 
 /**
@@ -1932,7 +2028,7 @@ static void cpuset_do_move_task(struct task_struct *tsk,
  * @from: cpuset in which the tasks currently reside
  * @to: cpuset to which the tasks will be moved
  *
- * Called with cgroup_mutex held
+ * Called with cpuset_mutex held
  * callback_mutex must not be held, as cpuset_attach() will take it.
  *
  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
@@ -1959,169 +2055,200 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
  * removing that CPU or node from all cpusets.  If this removes the
  * last CPU or node from a cpuset, then move the tasks in the empty
  * cpuset to its next-highest non-empty parent.
- *
- * Called with cgroup_mutex held
- * callback_mutex must not be held, as cpuset_attach() will take it.
  */
 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
 {
        struct cpuset *parent;
 
-       /*
-        * The cgroup's css_sets list is in use if there are tasks
-        * in the cpuset; the list is empty if there are none;
-        * the cs->css.refcnt seems always 0.
-        */
-       if (list_empty(&cs->css.cgroup->css_sets))
-               return;
-
        /*
         * Find its next-highest non-empty parent, (top cpuset
         * has online cpus, so can't be empty).
         */
-       parent = cs->parent;
+       parent = parent_cs(cs);
        while (cpumask_empty(parent->cpus_allowed) ||
                        nodes_empty(parent->mems_allowed))
-               parent = parent->parent;
+               parent = parent_cs(parent);
 
        move_member_tasks_to_cpuset(cs, parent);
 }
 
-/*
- * Helper function to traverse cpusets.
- * It can be used to walk the cpuset tree from top to bottom, completing
- * one layer before dropping down to the next (thus always processing a
- * node before any of its children).
+/**
+ * cpuset_propagate_hotplug_workfn - propagate CPU/memory hotplug to a cpuset
+ * @cs: cpuset in interest
+ *
+ * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
+ * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
+ * all its tasks are moved to the nearest ancestor with both resources.
  */
-static struct cpuset *cpuset_next(struct list_head *queue)
+static void cpuset_propagate_hotplug_workfn(struct work_struct *work)
 {
-       struct cpuset *cp;
-       struct cpuset *child;   /* scans child cpusets of cp */
-       struct cgroup *cont;
+       static cpumask_t off_cpus;
+       static nodemask_t off_mems, tmp_mems;
+       struct cpuset *cs = container_of(work, struct cpuset, hotplug_work);
+       bool is_empty;
 
-       if (list_empty(queue))
-               return NULL;
+       mutex_lock(&cpuset_mutex);
+
+       cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
+       nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
 
-       cp = list_first_entry(queue, struct cpuset, stack_list);
-       list_del(queue->next);
-       list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
-               child = cgroup_cs(cont);
-               list_add_tail(&child->stack_list, queue);
+       /* remove offline cpus from @cs */
+       if (!cpumask_empty(&off_cpus)) {
+               mutex_lock(&callback_mutex);
+               cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
+               mutex_unlock(&callback_mutex);
+               update_tasks_cpumask(cs, NULL);
+       }
+
+       /* remove offline mems from @cs */
+       if (!nodes_empty(off_mems)) {
+               tmp_mems = cs->mems_allowed;
+               mutex_lock(&callback_mutex);
+               nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
+               mutex_unlock(&callback_mutex);
+               update_tasks_nodemask(cs, &tmp_mems, NULL);
        }
 
-       return cp;
+       is_empty = cpumask_empty(cs->cpus_allowed) ||
+               nodes_empty(cs->mems_allowed);
+
+       mutex_unlock(&cpuset_mutex);
+
+       /*
+        * If @cs became empty, move tasks to the nearest ancestor with
+        * execution resources.  This is full cgroup operation which will
+        * also call back into cpuset.  Should be done outside any lock.
+        */
+       if (is_empty)
+               remove_tasks_in_empty_cpuset(cs);
+
+       /* the following may free @cs, should be the last operation */
+       css_put(&cs->css);
 }
 
+/**
+ * schedule_cpuset_propagate_hotplug - schedule hotplug propagation to a cpuset
+ * @cs: cpuset of interest
+ *
+ * Schedule cpuset_propagate_hotplug_workfn() which will update CPU and
+ * memory masks according to top_cpuset.
+ */
+static void schedule_cpuset_propagate_hotplug(struct cpuset *cs)
+{
+       /*
+        * Pin @cs.  The refcnt will be released when the work item
+        * finishes executing.
+        */
+       if (!css_tryget(&cs->css))
+               return;
 
-/*
- * Walk the specified cpuset subtree upon a hotplug operation (CPU/Memory
- * online/offline) and update the cpusets accordingly.
- * For regular CPU/Mem hotplug, look for empty cpusets; the tasks of such
- * cpuset must be moved to a parent cpuset.
+       /*
+        * Queue @cs->hotplug_work.  If already pending, lose the css ref.
+        * cpuset_propagate_hotplug_wq is ordered and propagation will
+        * happen in the order this function is called.
+        */
+       if (!queue_work(cpuset_propagate_hotplug_wq, &cs->hotplug_work))
+               css_put(&cs->css);
+}
+
+/**
+ * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
  *
- * Called with cgroup_mutex held.  We take callback_mutex to modify
- * cpus_allowed and mems_allowed.
+ * This function is called after either CPU or memory configuration has
+ * changed and updates cpuset accordingly.  The top_cpuset is always
+ * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
+ * order to make cpusets transparent (of no affect) on systems that are
+ * actively using CPU hotplug but making no active use of cpusets.
  *
- * This walk processes the tree from top to bottom, completing one layer
- * before dropping down to the next.  It always processes a node before
- * any of its children.
+ * Non-root cpusets are only affected by offlining.  If any CPUs or memory
+ * nodes have been taken down, cpuset_propagate_hotplug() is invoked on all
+ * descendants.
  *
- * In the case of memory hot-unplug, it will remove nodes from N_MEMORY
- * if all present pages from a node are offlined.
+ * Note that CPU offlining during suspend is ignored.  We don't modify
+ * cpusets across suspend/resume cycles at all.
  */
-static void
-scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
+static void cpuset_hotplug_workfn(struct work_struct *work)
 {
-       LIST_HEAD(queue);
-       struct cpuset *cp;              /* scans cpusets being updated */
-       static nodemask_t oldmems;      /* protected by cgroup_mutex */
+       static cpumask_t new_cpus, tmp_cpus;
+       static nodemask_t new_mems, tmp_mems;
+       bool cpus_updated, mems_updated;
+       bool cpus_offlined, mems_offlined;
 
-       list_add_tail((struct list_head *)&root->stack_list, &queue);
+       mutex_lock(&cpuset_mutex);
 
-       switch (event) {
-       case CPUSET_CPU_OFFLINE:
-               while ((cp = cpuset_next(&queue)) != NULL) {
+       /* fetch the available cpus/mems and find out which changed how */
+       cpumask_copy(&new_cpus, cpu_active_mask);
+       new_mems = node_states[N_MEMORY];
 
-                       /* Continue past cpusets with all cpus online */
-                       if (cpumask_subset(cp->cpus_allowed, cpu_active_mask))
-                               continue;
+       cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
+       cpus_offlined = cpumask_andnot(&tmp_cpus, top_cpuset.cpus_allowed,
+                                      &new_cpus);
 
-                       /* Remove offline cpus from this cpuset. */
-                       mutex_lock(&callback_mutex);
-                       cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
-                                                       cpu_active_mask);
-                       mutex_unlock(&callback_mutex);
+       mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
+       nodes_andnot(tmp_mems, top_cpuset.mems_allowed, new_mems);
+       mems_offlined = !nodes_empty(tmp_mems);
 
-                       /* Move tasks from the empty cpuset to a parent */
-                       if (cpumask_empty(cp->cpus_allowed))
-                               remove_tasks_in_empty_cpuset(cp);
-                       else
-                               update_tasks_cpumask(cp, NULL);
-               }
-               break;
+       /* synchronize cpus_allowed to cpu_active_mask */
+       if (cpus_updated) {
+               mutex_lock(&callback_mutex);
+               cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+               mutex_unlock(&callback_mutex);
+               /* we don't mess with cpumasks of tasks in top_cpuset */
+       }
 
-       case CPUSET_MEM_OFFLINE:
-               while ((cp = cpuset_next(&queue)) != NULL) {
+       /* synchronize mems_allowed to N_MEMORY */
+       if (mems_updated) {
+               tmp_mems = top_cpuset.mems_allowed;
+               mutex_lock(&callback_mutex);
+               top_cpuset.mems_allowed = new_mems;
+               mutex_unlock(&callback_mutex);
+               update_tasks_nodemask(&top_cpuset, &tmp_mems, NULL);
+       }
 
-                       /* Continue past cpusets with all mems online */
-                       if (nodes_subset(cp->mems_allowed,
-                                       node_states[N_MEMORY]))
-                               continue;
+       /* if cpus or mems went down, we need to propagate to descendants */
+       if (cpus_offlined || mems_offlined) {
+               struct cpuset *cs;
+               struct cgroup *pos_cgrp;
 
-                       oldmems = cp->mems_allowed;
+               rcu_read_lock();
+               cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset)
+                       schedule_cpuset_propagate_hotplug(cs);
+               rcu_read_unlock();
+       }
 
-                       /* Remove offline mems from this cpuset. */
-                       mutex_lock(&callback_mutex);
-                       nodes_and(cp->mems_allowed, cp->mems_allowed,
-                                               node_states[N_MEMORY]);
-                       mutex_unlock(&callback_mutex);
+       mutex_unlock(&cpuset_mutex);
 
-                       /* Move tasks from the empty cpuset to a parent */
-                       if (nodes_empty(cp->mems_allowed))
-                               remove_tasks_in_empty_cpuset(cp);
-                       else
-                               update_tasks_nodemask(cp, &oldmems, NULL);
-               }
+       /* wait for propagations to finish */
+       flush_workqueue(cpuset_propagate_hotplug_wq);
+
+       /* rebuild sched domains if cpus_allowed has changed */
+       if (cpus_updated) {
+               struct sched_domain_attr *attr;
+               cpumask_var_t *doms;
+               int ndoms;
+
+               mutex_lock(&cpuset_mutex);
+               ndoms = generate_sched_domains(&doms, &attr);
+               mutex_unlock(&cpuset_mutex);
+
+               partition_sched_domains(ndoms, doms, attr);
        }
 }
 
-/*
- * The top_cpuset tracks what CPUs and Memory Nodes are online,
- * period.  This is necessary in order to make cpusets transparent
- * (of no affect) on systems that are actively using CPU hotplug
- * but making no active use of cpusets.
- *
- * The only exception to this is suspend/resume, where we don't
- * modify cpusets at all.
- *
- * This routine ensures that top_cpuset.cpus_allowed tracks
- * cpu_active_mask on each CPU hotplug (cpuhp) event.
- *
- * Called within get_online_cpus().  Needs to call cgroup_lock()
- * before calling generate_sched_domains().
- *
- * @cpu_online: Indicates whether this is a CPU online event (true) or
- * a CPU offline event (false).
- */
 void cpuset_update_active_cpus(bool cpu_online)
 {
-       struct sched_domain_attr *attr;
-       cpumask_var_t *doms;
-       int ndoms;
-
-       cgroup_lock();
-       mutex_lock(&callback_mutex);
-       cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
-       mutex_unlock(&callback_mutex);
-
-       if (!cpu_online)
-               scan_cpusets_upon_hotplug(&top_cpuset, CPUSET_CPU_OFFLINE);
-
-       ndoms = generate_sched_domains(&doms, &attr);
-       cgroup_unlock();
-
-       /* Have scheduler rebuild the domains */
-       partition_sched_domains(ndoms, doms, attr);
+       /*
+        * We're inside cpu hotplug critical region which usually nests
+        * inside cgroup synchronization.  Bounce actual hotplug processing
+        * to a work item to avoid reverse locking order.
+        *
+        * We still need to do partition_sched_domains() synchronously;
+        * otherwise, the scheduler will get confused and put tasks to the
+        * dead CPU.  Fall back to the default single domain.
+        * cpuset_hotplug_workfn() will rebuild it as necessary.
+        */
+       partition_sched_domains(1, NULL, NULL);
+       schedule_work(&cpuset_hotplug_work);
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -2133,29 +2260,7 @@ void cpuset_update_active_cpus(bool cpu_online)
 static int cpuset_track_online_nodes(struct notifier_block *self,
                                unsigned long action, void *arg)
 {
-       static nodemask_t oldmems;      /* protected by cgroup_mutex */
-
-       cgroup_lock();
-       switch (action) {
-       case MEM_ONLINE:
-               oldmems = top_cpuset.mems_allowed;
-               mutex_lock(&callback_mutex);
-               top_cpuset.mems_allowed = node_states[N_MEMORY];
-               mutex_unlock(&callback_mutex);
-               update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
-               break;
-       case MEM_OFFLINE:
-               /*
-                * needn't update top_cpuset.mems_allowed explicitly because
-                * scan_cpusets_upon_hotplug() will update it.
-                */
-               scan_cpusets_upon_hotplug(&top_cpuset, CPUSET_MEM_OFFLINE);
-               break;
-       default:
-               break;
-       }
-       cgroup_unlock();
-
+       schedule_work(&cpuset_hotplug_work);
        return NOTIFY_OK;
 }
 #endif
@@ -2173,8 +2278,9 @@ void __init cpuset_init_smp(void)
 
        hotplug_memory_notifier(cpuset_track_online_nodes, 10);
 
-       cpuset_wq = create_singlethread_workqueue("cpuset");
-       BUG_ON(!cpuset_wq);
+       cpuset_propagate_hotplug_wq =
+               alloc_ordered_workqueue("cpuset_hotplug", 0);
+       BUG_ON(!cpuset_propagate_hotplug_wq);
 }
 
 /**
@@ -2273,8 +2379,8 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  */
 static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
 {
-       while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
-               cs = cs->parent;
+       while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
+               cs = parent_cs(cs);
        return cs;
 }
 
@@ -2411,17 +2517,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
        return 0;
 }
 
-/**
- * cpuset_unlock - release lock on cpuset changes
- *
- * Undo the lock taken in a previous cpuset_lock() call.
- */
-
-void cpuset_unlock(void)
-{
-       mutex_unlock(&callback_mutex);
-}
-
 /**
  * cpuset_mem_spread_node() - On which node to begin search for a file page
  * cpuset_slab_spread_node() - On which node to begin search for a slab page
@@ -2511,8 +2606,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
 
        dentry = task_cs(tsk)->css.cgroup->dentry;
        spin_lock(&cpuset_buffer_lock);
-       snprintf(cpuset_name, CPUSET_NAME_LEN,
-                dentry ? (const char *)dentry->d_name.name : "/");
+
+       if (!dentry) {
+               strcpy(cpuset_name, "/");
+       } else {
+               spin_lock(&dentry->d_lock);
+               strlcpy(cpuset_name, (const char *)dentry->d_name.name,
+                       CPUSET_NAME_LEN);
+               spin_unlock(&dentry->d_lock);
+       }
+
        nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
                           tsk->mems_allowed);
        printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
@@ -2560,7 +2663,7 @@ void __cpuset_memory_pressure_bump(void)
  *  - Used for /proc/<pid>/cpuset.
  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
  *    doesn't really matter if tsk->cpuset changes after we read it,
- *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
+ *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
  *    anyway.
  */
 static int proc_cpuset_show(struct seq_file *m, void *unused_v)
@@ -2582,16 +2685,15 @@ static int proc_cpuset_show(struct seq_file *m, void *unused_v)
        if (!tsk)
                goto out_free;
 
-       retval = -EINVAL;
-       cgroup_lock();
+       rcu_read_lock();
        css = task_subsys_state(tsk, cpuset_subsys_id);
        retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
+       rcu_read_unlock();
        if (retval < 0)
-               goto out_unlock;
+               goto out_put_task;
        seq_puts(m, buf);
        seq_putc(m, '\n');
-out_unlock:
-       cgroup_unlock();
+out_put_task:
        put_task_struct(tsk);
 out_free:
        kfree(buf);
index 4d5f8d5612f349389fee0c22442267b6545c3931..8875254120b66f9a0a6e784d83fc6a1e76f6f4c0 100644 (file)
@@ -1970,6 +1970,8 @@ static int kdb_lsmod(int argc, const char **argv)
 
        kdb_printf("Module                  Size  modstruct     Used by\n");
        list_for_each_entry(mod, kdb_modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
 
                kdb_printf("%-20s%8u  0x%p ", mod->name,
                           mod->core_size, (void *)mod);
index 418b3f7053aaad32cef7d6082e833e5f33fdd6b7..d473988c1d0bc7505db5cbd17f637787bcdd3d1c 100644 (file)
@@ -106,6 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
        unsigned long long t2, t3;
        unsigned long flags;
        struct timespec ts;
+       cputime_t utime, stime, stimescaled, utimescaled;
 
        /* Though tsk->delays accessed later, early exit avoids
         * unnecessary returning of other data
@@ -114,12 +115,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
                goto done;
 
        tmp = (s64)d->cpu_run_real_total;
-       cputime_to_timespec(tsk->utime + tsk->stime, &ts);
+       task_cputime(tsk, &utime, &stime);
+       cputime_to_timespec(utime + stime, &ts);
        tmp += timespec_to_ns(&ts);
        d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
 
        tmp = (s64)d->cpu_scaled_run_real_total;
-       cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
+       task_cputime_scaled(tsk, &utimescaled, &stimescaled);
+       cputime_to_timespec(utimescaled + stimescaled, &ts);
        tmp += timespec_to_ns(&ts);
        d->cpu_scaled_run_real_total =
                (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
index 301079d06f24ebe44081a286766436de104a3a91..5c75791d7269e87c19a86fe711364f2c3c2793f2 100644 (file)
@@ -907,6 +907,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
                ctx->nr_stat++;
 }
 
+/*
+ * Initialize event state based on the perf_event_attr::disabled.
+ */
+static inline void perf_event__state_init(struct perf_event *event)
+{
+       event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
+                                             PERF_EVENT_STATE_INACTIVE;
+}
+
 /*
  * Called at perf_event creation and when events are attached/detached from a
  * group.
@@ -6162,11 +6171,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        if (task) {
                event->attach_state = PERF_ATTACH_TASK;
+
+               if (attr->type == PERF_TYPE_TRACEPOINT)
+                       event->hw.tp_target = task;
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                /*
                 * hw_breakpoint is a bit difficult here..
                 */
-               if (attr->type == PERF_TYPE_BREAKPOINT)
+               else if (attr->type == PERF_TYPE_BREAKPOINT)
                        event->hw.bp_target = task;
 #endif
        }
@@ -6179,8 +6191,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        event->overflow_handler = overflow_handler;
        event->overflow_handler_context = context;
 
-       if (attr->disabled)
-               event->state = PERF_EVENT_STATE_OFF;
+       perf_event__state_init(event);
 
        pmu = NULL;
 
@@ -6609,9 +6620,17 @@ SYSCALL_DEFINE5(perf_event_open,
 
                mutex_lock(&gctx->mutex);
                perf_remove_from_context(group_leader);
+
+               /*
+                * Removing from the context ends up with disabled
+                * event. What we want here is event in the initial
+                * startup state, ready to be add into new context.
+                */
+               perf_event__state_init(group_leader);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
                        perf_remove_from_context(sibling);
+                       perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
                mutex_unlock(&gctx->mutex);
index fe8a916507ed278cf545196561878a842547a865..a64f8aeb5c1f5adae53fde406b3d904e1485aa80 100644 (file)
@@ -676,7 +676,7 @@ int __init init_hw_breakpoint(void)
  err_alloc:
        for_each_possible_cpu(err_cpu) {
                for (i = 0; i < TYPE_MAX; i++)
-                       kfree(per_cpu(nr_task_bp_pinned[i], cpu));
+                       kfree(per_cpu(nr_task_bp_pinned[i], err_cpu));
                if (err_cpu == cpu)
                        break;
        }
index dea7acfbb0710889bf19e5fcb6584da4c5302f5d..a567c8c7ef31fa8c7d9416c8a255717f6b978932 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pagemap.h>     /* read_mapping_page */
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/export.h>
 #include <linux/rmap.h>                /* anon_vma_prepare */
 #include <linux/mmu_notifier.h>        /* set_pte_at_notify */
 #include <linux/swap.h>                /* try_to_free_swap */
 #define MAX_UPROBE_XOL_SLOTS           UINSNS_PER_PAGE
 
 static struct rb_root uprobes_tree = RB_ROOT;
-
-static DEFINE_SPINLOCK(uprobes_treelock);      /* serialize rbtree access */
-
-#define UPROBES_HASH_SZ        13
-
 /*
- * We need separate register/unregister and mmap/munmap lock hashes because
- * of mmap_sem nesting.
- *
- * uprobe_register() needs to install probes on (potentially) all processes
- * and thus needs to acquire multiple mmap_sems (consequtively, not
- * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
- * for the particular process doing the mmap.
- *
- * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
- * because of lock order against i_mmap_mutex. This means there's a hole in
- * the register vma iteration where a mmap() can happen.
- *
- * Thus uprobe_register() can race with uprobe_mmap() and we can try and
- * install a probe where one is already installed.
+ * allows us to skip the uprobe_mmap if there are no uprobe events active
+ * at this time.  Probably a fine grained per inode count is better?
  */
+#define no_uprobe_events()     RB_EMPTY_ROOT(&uprobes_tree)
 
-/* serialize (un)register */
-static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
-
-#define uprobes_hash(v)                (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
+static DEFINE_SPINLOCK(uprobes_treelock);      /* serialize rbtree access */
 
+#define UPROBES_HASH_SZ        13
 /* serialize uprobe->pending_list */
 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
 #define uprobes_mmap_hash(v)   (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
 
 static struct percpu_rw_semaphore dup_mmap_sem;
 
-/*
- * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
- * events active at this time.  Probably a fine grained per inode count is
- * better?
- */
-static atomic_t uprobe_events = ATOMIC_INIT(0);
-
 /* Have a copy of original instruction */
 #define UPROBE_COPY_INSN       0
-/* Dont run handlers when first register/ last unregister in progress*/
-#define UPROBE_RUN_HANDLER     1
 /* Can skip singlestep */
-#define UPROBE_SKIP_SSTEP      2
+#define UPROBE_SKIP_SSTEP      1
 
 struct uprobe {
        struct rb_node          rb_node;        /* node in the rb tree */
        atomic_t                ref;
+       struct rw_semaphore     register_rwsem;
        struct rw_semaphore     consumer_rwsem;
-       struct mutex            copy_mutex;     /* TODO: kill me and UPROBE_COPY_INSN */
        struct list_head        pending_list;
        struct uprobe_consumer  *consumers;
        struct inode            *inode;         /* Also hold a ref to inode */
@@ -430,9 +404,6 @@ static struct uprobe *insert_uprobe(struct uprobe *uprobe)
        u = __insert_uprobe(uprobe);
        spin_unlock(&uprobes_treelock);
 
-       /* For now assume that the instruction need not be single-stepped */
-       __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
-
        return u;
 }
 
@@ -452,8 +423,10 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 
        uprobe->inode = igrab(inode);
        uprobe->offset = offset;
+       init_rwsem(&uprobe->register_rwsem);
        init_rwsem(&uprobe->consumer_rwsem);
-       mutex_init(&uprobe->copy_mutex);
+       /* For now assume that the instruction need not be single-stepped */
+       __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
 
        /* add to uprobes_tree, sorted on inode:offset */
        cur_uprobe = insert_uprobe(uprobe);
@@ -463,38 +436,17 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
                kfree(uprobe);
                uprobe = cur_uprobe;
                iput(inode);
-       } else {
-               atomic_inc(&uprobe_events);
        }
 
        return uprobe;
 }
 
-static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
-{
-       struct uprobe_consumer *uc;
-
-       if (!test_bit(UPROBE_RUN_HANDLER, &uprobe->flags))
-               return;
-
-       down_read(&uprobe->consumer_rwsem);
-       for (uc = uprobe->consumers; uc; uc = uc->next) {
-               if (!uc->filter || uc->filter(uc, current))
-                       uc->handler(uc, regs);
-       }
-       up_read(&uprobe->consumer_rwsem);
-}
-
-/* Returns the previous consumer */
-static struct uprobe_consumer *
-consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
+static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
        down_write(&uprobe->consumer_rwsem);
        uc->next = uprobe->consumers;
        uprobe->consumers = uc;
        up_write(&uprobe->consumer_rwsem);
-
-       return uc->next;
 }
 
 /*
@@ -588,7 +540,8 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
        if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
                return ret;
 
-       mutex_lock(&uprobe->copy_mutex);
+       /* TODO: move this into _register, until then we abuse this sem. */
+       down_write(&uprobe->consumer_rwsem);
        if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
                goto out;
 
@@ -612,7 +565,30 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
        set_bit(UPROBE_COPY_INSN, &uprobe->flags);
 
  out:
-       mutex_unlock(&uprobe->copy_mutex);
+       up_write(&uprobe->consumer_rwsem);
+
+       return ret;
+}
+
+static inline bool consumer_filter(struct uprobe_consumer *uc,
+                                  enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+{
+       return !uc->filter || uc->filter(uc, ctx, mm);
+}
+
+static bool filter_chain(struct uprobe *uprobe,
+                        enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+{
+       struct uprobe_consumer *uc;
+       bool ret = false;
+
+       down_read(&uprobe->consumer_rwsem);
+       for (uc = uprobe->consumers; uc; uc = uc->next) {
+               ret = consumer_filter(uc, ctx, mm);
+               if (ret)
+                       break;
+       }
+       up_read(&uprobe->consumer_rwsem);
 
        return ret;
 }
@@ -624,16 +600,6 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
        bool first_uprobe;
        int ret;
 
-       /*
-        * If probe is being deleted, unregister thread could be done with
-        * the vma-rmap-walk through. Adding a probe now can be fatal since
-        * nobody will be able to cleanup. Also we could be from fork or
-        * mremap path, where the probe might have already been inserted.
-        * Hence behave as if probe already existed.
-        */
-       if (!uprobe->consumers)
-               return 0;
-
        ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
        if (ret)
                return ret;
@@ -658,14 +624,14 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
 static int
 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
 {
-       /* can happen if uprobe_register() fails */
-       if (!test_bit(MMF_HAS_UPROBES, &mm->flags))
-               return 0;
-
        set_bit(MMF_RECALC_UPROBES, &mm->flags);
        return set_orig_insn(&uprobe->arch, mm, vaddr);
 }
 
+static inline bool uprobe_is_active(struct uprobe *uprobe)
+{
+       return !RB_EMPTY_NODE(&uprobe->rb_node);
+}
 /*
  * There could be threads that have already hit the breakpoint. They
  * will recheck the current insn and restart if find_uprobe() fails.
@@ -673,12 +639,15 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad
  */
 static void delete_uprobe(struct uprobe *uprobe)
 {
+       if (WARN_ON(!uprobe_is_active(uprobe)))
+               return;
+
        spin_lock(&uprobes_treelock);
        rb_erase(&uprobe->rb_node, &uprobes_tree);
        spin_unlock(&uprobes_treelock);
+       RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
        iput(uprobe->inode);
        put_uprobe(uprobe);
-       atomic_dec(&uprobe_events);
 }
 
 struct map_info {
@@ -764,8 +733,10 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
        return curr;
 }
 
-static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
+static int
+register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
 {
+       bool is_register = !!new;
        struct map_info *info;
        int err = 0;
 
@@ -794,10 +765,16 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
                    vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
                        goto unlock;
 
-               if (is_register)
-                       err = install_breakpoint(uprobe, mm, vma, info->vaddr);
-               else
-                       err |= remove_breakpoint(uprobe, mm, info->vaddr);
+               if (is_register) {
+                       /* consult only the "caller", new consumer. */
+                       if (consumer_filter(new,
+                                       UPROBE_FILTER_REGISTER, mm))
+                               err = install_breakpoint(uprobe, mm, vma, info->vaddr);
+               } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
+                       if (!filter_chain(uprobe,
+                                       UPROBE_FILTER_UNREGISTER, mm))
+                               err |= remove_breakpoint(uprobe, mm, info->vaddr);
+               }
 
  unlock:
                up_write(&mm->mmap_sem);
@@ -810,17 +787,23 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
        return err;
 }
 
-static int __uprobe_register(struct uprobe *uprobe)
+static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
-       return register_for_each_vma(uprobe, true);
+       consumer_add(uprobe, uc);
+       return register_for_each_vma(uprobe, uc);
 }
 
-static void __uprobe_unregister(struct uprobe *uprobe)
+static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
 {
-       if (!register_for_each_vma(uprobe, false))
-               delete_uprobe(uprobe);
+       int err;
+
+       if (!consumer_del(uprobe, uc))  /* WARN? */
+               return;
 
+       err = register_for_each_vma(uprobe, NULL);
        /* TODO : cant unregister? schedule a worker thread */
+       if (!uprobe->consumers && !err)
+               delete_uprobe(uprobe);
 }
 
 /*
@@ -845,31 +828,59 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *
        struct uprobe *uprobe;
        int ret;
 
-       if (!inode || !uc || uc->next)
-               return -EINVAL;
-
+       /* Racy, just to catch the obvious mistakes */
        if (offset > i_size_read(inode))
                return -EINVAL;
 
-       ret = 0;
-       mutex_lock(uprobes_hash(inode));
+ retry:
        uprobe = alloc_uprobe(inode, offset);
-
-       if (!uprobe) {
-               ret = -ENOMEM;
-       } else if (!consumer_add(uprobe, uc)) {
-               ret = __uprobe_register(uprobe);
-               if (ret) {
-                       uprobe->consumers = NULL;
-                       __uprobe_unregister(uprobe);
-               } else {
-                       set_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
-               }
+       if (!uprobe)
+               return -ENOMEM;
+       /*
+        * We can race with uprobe_unregister()->delete_uprobe().
+        * Check uprobe_is_active() and retry if it is false.
+        */
+       down_write(&uprobe->register_rwsem);
+       ret = -EAGAIN;
+       if (likely(uprobe_is_active(uprobe))) {
+               ret = __uprobe_register(uprobe, uc);
+               if (ret)
+                       __uprobe_unregister(uprobe, uc);
        }
+       up_write(&uprobe->register_rwsem);
+       put_uprobe(uprobe);
 
-       mutex_unlock(uprobes_hash(inode));
-       if (uprobe)
-               put_uprobe(uprobe);
+       if (unlikely(ret == -EAGAIN))
+               goto retry;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(uprobe_register);
+
+/*
+ * uprobe_apply - unregister a already registered probe.
+ * @inode: the file in which the probe has to be removed.
+ * @offset: offset from the start of the file.
+ * @uc: consumer which wants to add more or remove some breakpoints
+ * @add: add or remove the breakpoints
+ */
+int uprobe_apply(struct inode *inode, loff_t offset,
+                       struct uprobe_consumer *uc, bool add)
+{
+       struct uprobe *uprobe;
+       struct uprobe_consumer *con;
+       int ret = -ENOENT;
+
+       uprobe = find_uprobe(inode, offset);
+       if (!uprobe)
+               return ret;
+
+       down_write(&uprobe->register_rwsem);
+       for (con = uprobe->consumers; con && con != uc ; con = con->next)
+               ;
+       if (con)
+               ret = register_for_each_vma(uprobe, add ? uc : NULL);
+       up_write(&uprobe->register_rwsem);
+       put_uprobe(uprobe);
 
        return ret;
 }
@@ -884,25 +895,42 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
 {
        struct uprobe *uprobe;
 
-       if (!inode || !uc)
-               return;
-
        uprobe = find_uprobe(inode, offset);
        if (!uprobe)
                return;
 
-       mutex_lock(uprobes_hash(inode));
+       down_write(&uprobe->register_rwsem);
+       __uprobe_unregister(uprobe, uc);
+       up_write(&uprobe->register_rwsem);
+       put_uprobe(uprobe);
+}
+EXPORT_SYMBOL_GPL(uprobe_unregister);
 
-       if (consumer_del(uprobe, uc)) {
-               if (!uprobe->consumers) {
-                       __uprobe_unregister(uprobe);
-                       clear_bit(UPROBE_RUN_HANDLER, &uprobe->flags);
-               }
+static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+       int err = 0;
+
+       down_read(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               unsigned long vaddr;
+               loff_t offset;
+
+               if (!valid_vma(vma, false) ||
+                   vma->vm_file->f_mapping->host != uprobe->inode)
+                       continue;
+
+               offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
+               if (uprobe->offset <  offset ||
+                   uprobe->offset >= offset + vma->vm_end - vma->vm_start)
+                       continue;
+
+               vaddr = offset_to_vaddr(vma, uprobe->offset);
+               err |= remove_breakpoint(uprobe, mm, vaddr);
        }
+       up_read(&mm->mmap_sem);
 
-       mutex_unlock(uprobes_hash(inode));
-       if (uprobe)
-               put_uprobe(uprobe);
+       return err;
 }
 
 static struct rb_node *
@@ -979,7 +1007,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
        struct uprobe *uprobe, *u;
        struct inode *inode;
 
-       if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
+       if (no_uprobe_events() || !valid_vma(vma, true))
                return 0;
 
        inode = vma->vm_file->f_mapping->host;
@@ -988,9 +1016,14 @@ int uprobe_mmap(struct vm_area_struct *vma)
 
        mutex_lock(uprobes_mmap_hash(inode));
        build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
-
+       /*
+        * We can race with uprobe_unregister(), this uprobe can be already
+        * removed. But in this case filter_chain() must return false, all
+        * consumers have gone away.
+        */
        list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
-               if (!fatal_signal_pending(current)) {
+               if (!fatal_signal_pending(current) &&
+                   filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
                        unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
                        install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
                }
@@ -1025,7 +1058,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e
  */
 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
-       if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
+       if (no_uprobe_events() || !valid_vma(vma, false))
                return;
 
        if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
@@ -1042,22 +1075,14 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
 /* Slot allocation for XOL */
 static int xol_add_vma(struct xol_area *area)
 {
-       struct mm_struct *mm;
-       int ret;
-
-       area->page = alloc_page(GFP_HIGHUSER);
-       if (!area->page)
-               return -ENOMEM;
-
-       ret = -EALREADY;
-       mm = current->mm;
+       struct mm_struct *mm = current->mm;
+       int ret = -EALREADY;
 
        down_write(&mm->mmap_sem);
        if (mm->uprobes_state.xol_area)
                goto fail;
 
        ret = -ENOMEM;
-
        /* Try to map as high as possible, this is only a hint. */
        area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
        if (area->vaddr & ~PAGE_MASK) {
@@ -1073,54 +1098,53 @@ static int xol_add_vma(struct xol_area *area)
        smp_wmb();      /* pairs with get_xol_area() */
        mm->uprobes_state.xol_area = area;
        ret = 0;
-
-fail:
+ fail:
        up_write(&mm->mmap_sem);
-       if (ret)
-               __free_page(area->page);
 
        return ret;
 }
 
-static struct xol_area *get_xol_area(struct mm_struct *mm)
-{
-       struct xol_area *area;
-
-       area = mm->uprobes_state.xol_area;
-       smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
-
-       return area;
-}
-
 /*
- * xol_alloc_area - Allocate process's xol_area.
- * This area will be used for storing instructions for execution out of
- * line.
+ * get_xol_area - Allocate process's xol_area if necessary.
+ * This area will be used for storing instructions for execution out of line.
  *
  * Returns the allocated area or NULL.
  */
-static struct xol_area *xol_alloc_area(void)
+static struct xol_area *get_xol_area(void)
 {
+       struct mm_struct *mm = current->mm;
        struct xol_area *area;
 
+       area = mm->uprobes_state.xol_area;
+       if (area)
+               goto ret;
+
        area = kzalloc(sizeof(*area), GFP_KERNEL);
        if (unlikely(!area))
-               return NULL;
+               goto out;
 
        area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
-
        if (!area->bitmap)
-               goto fail;
+               goto free_area;
+
+       area->page = alloc_page(GFP_HIGHUSER);
+       if (!area->page)
+               goto free_bitmap;
 
        init_waitqueue_head(&area->wq);
        if (!xol_add_vma(area))
                return area;
 
-fail:
+       __free_page(area->page);
+ free_bitmap:
        kfree(area->bitmap);
+ free_area:
        kfree(area);
-
-       return get_xol_area(current->mm);
+ out:
+       area = mm->uprobes_state.xol_area;
+ ret:
+       smp_read_barrier_depends();     /* pairs with wmb in xol_add_vma() */
+       return area;
 }
 
 /*
@@ -1186,33 +1210,26 @@ static unsigned long xol_take_insn_slot(struct xol_area *area)
 }
 
 /*
- * xol_get_insn_slot - If was not allocated a slot, then
- * allocate a slot.
+ * xol_get_insn_slot - allocate a slot for xol.
  * Returns the allocated slot address or 0.
  */
-static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot_addr)
+static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
 {
        struct xol_area *area;
        unsigned long offset;
+       unsigned long xol_vaddr;
        void *vaddr;
 
-       area = get_xol_area(current->mm);
-       if (!area) {
-               area = xol_alloc_area();
-               if (!area)
-                       return 0;
-       }
-       current->utask->xol_vaddr = xol_take_insn_slot(area);
+       area = get_xol_area();
+       if (!area)
+               return 0;
 
-       /*
-        * Initialize the slot if xol_vaddr points to valid
-        * instruction slot.
-        */
-       if (unlikely(!current->utask->xol_vaddr))
+       xol_vaddr = xol_take_insn_slot(area);
+       if (unlikely(!xol_vaddr))
                return 0;
 
-       current->utask->vaddr = slot_addr;
-       offset = current->utask->xol_vaddr & ~PAGE_MASK;
+       /* Initialize the slot */
+       offset = xol_vaddr & ~PAGE_MASK;
        vaddr = kmap_atomic(area->page);
        memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
        kunmap_atomic(vaddr);
@@ -1222,7 +1239,7 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot
         */
        flush_dcache_page(area->page);
 
-       return current->utask->xol_vaddr;
+       return xol_vaddr;
 }
 
 /*
@@ -1240,8 +1257,7 @@ static void xol_free_insn_slot(struct task_struct *tsk)
                return;
 
        slot_addr = tsk->utask->xol_vaddr;
-
-       if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
+       if (unlikely(!slot_addr))
                return;
 
        area = tsk->mm->uprobes_state.xol_area;
@@ -1303,33 +1319,48 @@ void uprobe_copy_process(struct task_struct *t)
 }
 
 /*
- * Allocate a uprobe_task object for the task.
- * Called when the thread hits a breakpoint for the first time.
+ * Allocate a uprobe_task object for the task if if necessary.
+ * Called when the thread hits a breakpoint.
  *
  * Returns:
  * - pointer to new uprobe_task on success
  * - NULL otherwise
  */
-static struct uprobe_task *add_utask(void)
+static struct uprobe_task *get_utask(void)
 {
-       struct uprobe_task *utask;
-
-       utask = kzalloc(sizeof *utask, GFP_KERNEL);
-       if (unlikely(!utask))
-               return NULL;
-
-       current->utask = utask;
-       return utask;
+       if (!current->utask)
+               current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
+       return current->utask;
 }
 
 /* Prepare to single-step probed instruction out of line. */
 static int
-pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
+pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
 {
-       if (xol_get_insn_slot(uprobe, vaddr) && !arch_uprobe_pre_xol(&uprobe->arch, regs))
-               return 0;
+       struct uprobe_task *utask;
+       unsigned long xol_vaddr;
+       int err;
+
+       utask = get_utask();
+       if (!utask)
+               return -ENOMEM;
+
+       xol_vaddr = xol_get_insn_slot(uprobe);
+       if (!xol_vaddr)
+               return -ENOMEM;
+
+       utask->xol_vaddr = xol_vaddr;
+       utask->vaddr = bp_vaddr;
+
+       err = arch_uprobe_pre_xol(&uprobe->arch, regs);
+       if (unlikely(err)) {
+               xol_free_insn_slot(current);
+               return err;
+       }
 
-       return -EFAULT;
+       utask->active_uprobe = uprobe;
+       utask->state = UTASK_SSTEP;
+       return 0;
 }
 
 /*
@@ -1391,6 +1422,7 @@ static void mmf_recalc_uprobes(struct mm_struct *mm)
                 * This is not strictly accurate, we can race with
                 * uprobe_unregister() and see the already removed
                 * uprobe if delete_uprobe() was not yet called.
+                * Or this uprobe can be filtered out.
                 */
                if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
                        return;
@@ -1452,13 +1484,33 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
        return uprobe;
 }
 
+static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
+{
+       struct uprobe_consumer *uc;
+       int remove = UPROBE_HANDLER_REMOVE;
+
+       down_read(&uprobe->register_rwsem);
+       for (uc = uprobe->consumers; uc; uc = uc->next) {
+               int rc = uc->handler(uc, regs);
+
+               WARN(rc & ~UPROBE_HANDLER_MASK,
+                       "bad rc=0x%x from %pf()\n", rc, uc->handler);
+               remove &= rc;
+       }
+
+       if (remove && uprobe->consumers) {
+               WARN_ON(!uprobe_is_active(uprobe));
+               unapply_uprobe(uprobe, current->mm);
+       }
+       up_read(&uprobe->register_rwsem);
+}
+
 /*
  * Run handler and ask thread to singlestep.
  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
  */
 static void handle_swbp(struct pt_regs *regs)
 {
-       struct uprobe_task *utask;
        struct uprobe *uprobe;
        unsigned long bp_vaddr;
        int uninitialized_var(is_swbp);
@@ -1483,6 +1535,10 @@ static void handle_swbp(struct pt_regs *regs)
                }
                return;
        }
+
+       /* change it in advance for ->handler() and restart */
+       instruction_pointer_set(regs, bp_vaddr);
+
        /*
         * TODO: move copy_insn/etc into _register and remove this hack.
         * After we hit the bp, _unregister + _register can install the
@@ -1490,32 +1546,16 @@ static void handle_swbp(struct pt_regs *regs)
         */
        smp_rmb(); /* pairs with wmb() in install_breakpoint() */
        if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
-               goto restart;
-
-       utask = current->utask;
-       if (!utask) {
-               utask = add_utask();
-               /* Cannot allocate; re-execute the instruction. */
-               if (!utask)
-                       goto restart;
-       }
+               goto out;
 
        handler_chain(uprobe, regs);
        if (can_skip_sstep(uprobe, regs))
                goto out;
 
-       if (!pre_ssout(uprobe, regs, bp_vaddr)) {
-               utask->active_uprobe = uprobe;
-               utask->state = UTASK_SSTEP;
+       if (!pre_ssout(uprobe, regs, bp_vaddr))
                return;
-       }
 
-restart:
-       /*
-        * cannot singlestep; cannot skip instruction;
-        * re-execute the instruction.
-        */
-       instruction_pointer_set(regs, bp_vaddr);
+       /* can_skip_sstep() succeeded, or restart if can't singlestep */
 out:
        put_uprobe(uprobe);
 }
@@ -1609,10 +1649,8 @@ static int __init init_uprobes(void)
 {
        int i;
 
-       for (i = 0; i < UPROBES_HASH_SZ; i++) {
-               mutex_init(&uprobes_mutex[i]);
+       for (i = 0; i < UPROBES_HASH_SZ; i++)
                mutex_init(&uprobes_mmap_mutex[i]);
-       }
 
        if (percpu_init_rwsem(&dup_mmap_sem))
                return -ENOMEM;
index b4df21937216e1704670d89e8ef8fe8aa9aee810..7dd20408707ce5845bf5fd63185b0980e1c9827a 100644 (file)
@@ -85,6 +85,7 @@ static void __exit_signal(struct task_struct *tsk)
        bool group_dead = thread_group_leader(tsk);
        struct sighand_struct *sighand;
        struct tty_struct *uninitialized_var(tty);
+       cputime_t utime, stime;
 
        sighand = rcu_dereference_check(tsk->sighand,
                                        lockdep_tasklist_lock_is_held());
@@ -123,9 +124,10 @@ static void __exit_signal(struct task_struct *tsk)
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
-               sig->utime += tsk->utime;
-               sig->stime += tsk->stime;
-               sig->gtime += tsk->gtime;
+               task_cputime(tsk, &utime, &stime);
+               sig->utime += utime;
+               sig->stime += stime;
+               sig->gtime += task_gtime(tsk);
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
                sig->nvcsw += tsk->nvcsw;
@@ -1092,7 +1094,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                sig = p->signal;
                psig->cutime += tgutime + sig->cutime;
                psig->cstime += tgstime + sig->cstime;
-               psig->cgtime += p->gtime + sig->gtime + sig->cgtime;
+               psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
                psig->cmin_flt +=
                        p->min_flt + sig->min_flt + sig->cmin_flt;
                psig->cmaj_flt +=
index 65ca6d27f24e1065013a428f12f33935b1b18490..4133876d8cd23838037bb62540197f2308578685 100644 (file)
@@ -1233,6 +1233,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
        p->prev_cputime.utime = p->prev_cputime.stime = 0;
 #endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+       seqlock_init(&p->vtime_seqlock);
+       p->vtime_snap = 0;
+       p->vtime_snap_whence = VTIME_SLEEPING;
+#endif
+
 #if defined(SPLIT_RSS_COUNTING)
        memset(&p->rss_stat, 0, sizeof(p->rss_stat));
 #endif
@@ -1668,8 +1674,10 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
                 int, tls_val)
 #endif
 {
-       return do_fork(clone_flags, newsp, 0,
-               parent_tidptr, child_tidptr);
+       long ret = do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
+       asmlinkage_protect(5, ret, clone_flags, newsp,
+                       parent_tidptr, child_tidptr, tls_val);
+       return ret;
 }
 #endif
 
index 19eb089ca003a7f7a09347eb393d5241cc93342e..9618b6e9fb367b2881ca2ccd28d88f46858e346d 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
 #include <linux/ptrace.h>
+#include <linux/sched/rt.h>
 
 #include <asm/futex.h>
 
index 6db7a5ed52b58727d33293853053c0fee1d049fe..cc47812d3feb0c86cbcf8f03e6ee81340ef7c57c 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/err.h>
 #include <linux/debugobjects.h>
 #include <linux/sched.h>
+#include <linux/sched/sysctl.h>
+#include <linux/sched/rt.h>
 #include <linux/timer.h>
 
 #include <asm/uaccess.h>
@@ -640,21 +642,9 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
  * and expiry check is done in the hrtimer_interrupt or in the softirq.
  */
 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-                                           struct hrtimer_clock_base *base,
-                                           int wakeup)
+                                           struct hrtimer_clock_base *base)
 {
-       if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
-               if (wakeup) {
-                       raw_spin_unlock(&base->cpu_base->lock);
-                       raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-                       raw_spin_lock(&base->cpu_base->lock);
-               } else
-                       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-
-               return 1;
-       }
-
-       return 0;
+       return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
 }
 
 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
@@ -735,8 +725,7 @@ static inline int hrtimer_switch_to_hres(void) { return 0; }
 static inline void
 hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-                                           struct hrtimer_clock_base *base,
-                                           int wakeup)
+                                           struct hrtimer_clock_base *base)
 {
        return 0;
 }
@@ -995,8 +984,21 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
         *
         * XXX send_remote_softirq() ?
         */
-       if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
-               hrtimer_enqueue_reprogram(timer, new_base, wakeup);
+       if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
+               && hrtimer_enqueue_reprogram(timer, new_base)) {
+               if (wakeup) {
+                       /*
+                        * We need to drop cpu_base->lock to avoid a
+                        * lock ordering issue vs. rq->lock.
+                        */
+                       raw_spin_unlock(&new_base->cpu_base->lock);
+                       raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+                       local_irq_restore(flags);
+                       return ret;
+               } else {
+                       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+               }
+       }
 
        unlock_hrtimer_base(timer, &flags);
 
index 3aca9f29d30ef4c5089ad97740588a31462fd22d..cbd97ce0b0007c0daaa15255e3417bda9140b688 100644 (file)
@@ -90,26 +90,40 @@ int irq_set_handler_data(unsigned int irq, void *data)
 EXPORT_SYMBOL(irq_set_handler_data);
 
 /**
- *     irq_set_msi_desc - set MSI descriptor data for an irq
- *     @irq:   Interrupt number
- *     @entry: Pointer to MSI descriptor data
+ *     irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
+ *     @irq_base:      Interrupt number base
+ *     @irq_offset:    Interrupt number offset
+ *     @entry:         Pointer to MSI descriptor data
  *
- *     Set the MSI descriptor entry for an irq
+ *     Set the MSI descriptor entry for an irq at offset
  */
-int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
+int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
+                        struct msi_desc *entry)
 {
        unsigned long flags;
-       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
+       struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 
        if (!desc)
                return -EINVAL;
        desc->irq_data.msi_desc = entry;
-       if (entry)
-               entry->irq = irq;
+       if (entry && !irq_offset)
+               entry->irq = irq_base;
        irq_put_desc_unlock(desc, flags);
        return 0;
 }
 
+/**
+ *     irq_set_msi_desc - set MSI descriptor data for an irq
+ *     @irq:   Interrupt number
+ *     @entry: Pointer to MSI descriptor data
+ *
+ *     Set the MSI descriptor entry for an irq
+ */
+int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
+{
+       return irq_set_msi_desc_off(irq, 0, entry);
+}
+
 /**
  *     irq_set_chip_data - set irq chip data for an irq
  *     @irq:   Interrupt number
index e49a288fa479036ebb25b907787b34568d421964..fa17855ca65a235bd5c1f9a7daa042b57722b924 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/task_work.h>
 
 #include "internals.h"
@@ -1524,6 +1525,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type)
 out:
        irq_put_desc_unlock(desc, flags);
 }
+EXPORT_SYMBOL_GPL(enable_percpu_irq);
 
 void disable_percpu_irq(unsigned int irq)
 {
@@ -1537,6 +1539,7 @@ void disable_percpu_irq(unsigned int irq)
        irq_percpu_disable(desc, cpu);
        irq_put_desc_unlock(desc, flags);
 }
+EXPORT_SYMBOL_GPL(disable_percpu_irq);
 
 /*
  * Internal function to unregister a percpu irqaction.
index 611cd6003c453951d82e12d6d03a044f7f285275..7b5f012bde9d73ff246652fe48d49e5b1aebc12c 100644 (file)
@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
 
        /*
         * All handlers must agree on IRQF_SHARED, so we test just the
-        * first. Check for action->next as well.
+        * first.
         */
        action = desc->action;
        if (!action || !(action->flags & IRQF_SHARED) ||
-           (action->flags & __IRQF_TIMER) ||
-           (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
-           !action->next)
+           (action->flags & __IRQF_TIMER))
                goto out;
 
        /* Already running on another processor */
@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
        do {
                if (handle_irq_event(desc) == IRQ_HANDLED)
                        ret = IRQ_HANDLED;
+               /* Make sure that there is still a valid action */
                action = desc->action;
        } while ((desc->istate & IRQS_PENDING) && action);
        desc->istate &= ~IRQS_POLL_INPROGRESS;
index 1588e3b2871b9e28a68316f91ca418d44a75cba2..55fcce6065cf6bc3213829fd8188cae90b61b778 100644 (file)
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
 #include <asm/processor.h>
 
-/*
- * An entry can be in one of four states:
- *
- * free             NULL, 0 -> {claimed}       : free to be used
- * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
- * pending   next, 3 -> {busy}          : queued, pending callback
- * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
- */
-
-#define IRQ_WORK_PENDING       1UL
-#define IRQ_WORK_BUSY          2UL
-#define IRQ_WORK_FLAGS         3UL
 
 static DEFINE_PER_CPU(struct llist_head, irq_work_list);
+static DEFINE_PER_CPU(int, irq_work_raised);
 
 /*
  * Claim the entry so that no one else will poke at it.
  */
 static bool irq_work_claim(struct irq_work *work)
 {
-       unsigned long flags, nflags;
+       unsigned long flags, oflags, nflags;
 
+       /*
+        * Start with our best wish as a premise but only trust any
+        * flag value after cmpxchg() result.
+        */
+       flags = work->flags & ~IRQ_WORK_PENDING;
        for (;;) {
-               flags = work->flags;
-               if (flags & IRQ_WORK_PENDING)
-                       return false;
                nflags = flags | IRQ_WORK_FLAGS;
-               if (cmpxchg(&work->flags, flags, nflags) == flags)
+               oflags = cmpxchg(&work->flags, flags, nflags);
+               if (oflags == flags)
                        break;
+               if (oflags & IRQ_WORK_PENDING)
+                       return false;
+               flags = oflags;
                cpu_relax();
        }
 
@@ -57,57 +56,69 @@ void __weak arch_irq_work_raise(void)
 }
 
 /*
- * Queue the entry and raise the IPI if needed.
+ * Enqueue the irq_work @entry unless it's already pending
+ * somewhere.
+ *
+ * Can be re-enqueued while the callback is still in progress.
  */
-static void __irq_work_queue(struct irq_work *work)
+void irq_work_queue(struct irq_work *work)
 {
-       bool empty;
+       /* Only queue if not already pending */
+       if (!irq_work_claim(work))
+               return;
 
+       /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
 
-       empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
-       /* The list was empty, raise self-interrupt to start processing. */
-       if (empty)
-               arch_irq_work_raise();
+       llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
+
+       /*
+        * If the work is not "lazy" or the tick is stopped, raise the irq
+        * work interrupt (if supported by the arch), otherwise, just wait
+        * for the next tick.
+        */
+       if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
+               if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
+                       arch_irq_work_raise();
+       }
 
        preempt_enable();
 }
+EXPORT_SYMBOL_GPL(irq_work_queue);
 
-/*
- * Enqueue the irq_work @entry, returns true on success, failure when the
- * @entry was already enqueued by someone else.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
-bool irq_work_queue(struct irq_work *work)
+bool irq_work_needs_cpu(void)
 {
-       if (!irq_work_claim(work)) {
-               /*
-                * Already enqueued, can't do!
-                */
+       struct llist_head *this_list;
+
+       this_list = &__get_cpu_var(irq_work_list);
+       if (llist_empty(this_list))
                return false;
-       }
 
-       __irq_work_queue(work);
+       /* All work should have been flushed before going offline */
+       WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
+
        return true;
 }
-EXPORT_SYMBOL_GPL(irq_work_queue);
 
-/*
- * Run the irq_work entries on this cpu. Requires to be ran from hardirq
- * context with local IRQs disabled.
- */
-void irq_work_run(void)
+static void __irq_work_run(void)
 {
+       unsigned long flags;
        struct irq_work *work;
        struct llist_head *this_list;
        struct llist_node *llnode;
 
+
+       /*
+        * Reset the "raised" state right before we check the list because
+        * an NMI may enqueue after we find the list empty from the runner.
+        */
+       __this_cpu_write(irq_work_raised, 0);
+       barrier();
+
        this_list = &__get_cpu_var(irq_work_list);
        if (llist_empty(this_list))
                return;
 
-       BUG_ON(!in_irq());
        BUG_ON(!irqs_disabled());
 
        llnode = llist_del_all(this_list);
@@ -119,16 +130,31 @@ void irq_work_run(void)
                /*
                 * Clear the PENDING bit, after this point the @work
                 * can be re-used.
+                * Make it immediately visible so that other CPUs trying
+                * to claim that work don't rely on us to handle their data
+                * while we are in the middle of the func.
                 */
-               work->flags = IRQ_WORK_BUSY;
+               flags = work->flags & ~IRQ_WORK_PENDING;
+               xchg(&work->flags, flags);
+
                work->func(work);
                /*
                 * Clear the BUSY bit and return to the free state if
                 * no-one else claimed it meanwhile.
                 */
-               (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
+               (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
        }
 }
+
+/*
+ * Run the irq_work entries on this cpu. Requires to be ran from hardirq
+ * context with local IRQs disabled.
+ */
+void irq_work_run(void)
+{
+       BUG_ON(!in_irq());
+       __irq_work_run();
+}
 EXPORT_SYMBOL_GPL(irq_work_run);
 
 /*
@@ -143,3 +169,35 @@ void irq_work_sync(struct irq_work *work)
                cpu_relax();
 }
 EXPORT_SYMBOL_GPL(irq_work_sync);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int irq_work_cpu_notify(struct notifier_block *self,
+                              unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_DYING:
+               /* Called from stop_machine */
+               if (WARN_ON_ONCE(cpu != smp_processor_id()))
+                       break;
+               __irq_work_run();
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_notify;
+
+static __init int irq_work_init_cpu_notifier(void)
+{
+       cpu_notify.notifier_call = irq_work_cpu_notify;
+       cpu_notify.priority = 0;
+       register_cpu_notifier(&cpu_notify);
+       return 0;
+}
+device_initcall(irq_work_init_cpu_notifier);
+
+#endif /* CONFIG_HOTPLUG_CPU */
index 0023a87e8de69357b043ab58c072417200433a29..56dd34976d7b0d0ba5526ec2ee00a97e8f1339aa 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/suspend.h>
 #include <linux/rwsem.h>
 #include <linux/ptrace.h>
+#include <linux/async.h>
 #include <asm/uaccess.h>
 
 #include <trace/events/module.h>
@@ -130,6 +131,14 @@ int __request_module(bool wait, const char *fmt, ...)
 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
        static int kmod_loop_msg;
 
+       /*
+        * We don't allow synchronous module loading from async.  Module
+        * init may invoke async_synchronize_full() which will end up
+        * waiting for this task which already is waiting for the module
+        * loading to complete, leading to a deadlock.
+        */
+       WARN_ON_ONCE(wait && current_is_async());
+
        va_start(args, fmt);
        ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
        va_end(args);
index 098f396aa40984d0b49f246b45dd059b88e9acdf..550294d58a02ef67c76b17440cdaee3675690047 100644 (file)
@@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list);
 
 static void kprobe_optimizer(struct work_struct *work);
 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
-static DECLARE_COMPLETION(optimizer_comp);
 #define OPTIMIZE_DELAY 5
 
 /*
@@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
 /* Start optimizer after OPTIMIZE_DELAY passed */
 static __kprobes void kick_kprobe_optimizer(void)
 {
-       if (!delayed_work_pending(&optimizing_work))
-               schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
+       schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
 }
 
 /* Kprobe jump optimizer */
@@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
        /* Step 5: Kick optimizer again if needed */
        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
                kick_kprobe_optimizer();
-       else
-               /* Wake up all waiters */
-               complete_all(&optimizer_comp);
 }
 
 /* Wait for completing optimization and unoptimization */
 static __kprobes void wait_for_kprobe_optimizer(void)
 {
-       if (delayed_work_pending(&optimizing_work))
-               wait_for_completion(&optimizer_comp);
+       mutex_lock(&kprobe_mutex);
+
+       while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
+               mutex_unlock(&kprobe_mutex);
+
+               /* this will also make optimizing_work execute immmediately */
+               flush_delayed_work(&optimizing_work);
+               /* @optimizing_work might not have been queued yet, relax */
+               cpu_relax();
+
+               mutex_lock(&kprobe_mutex);
+       }
+
+       mutex_unlock(&kprobe_mutex);
 }
 
 /* Optimize kprobe if p is ready to be optimized */
@@ -919,7 +926,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
 }
 #endif /* CONFIG_OPTPROBES */
 
-#ifdef KPROBES_CAN_USE_FTRACE
+#ifdef CONFIG_KPROBES_ON_FTRACE
 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
        .func = kprobe_ftrace_handler,
        .flags = FTRACE_OPS_FL_SAVE_REGS,
@@ -964,7 +971,7 @@ static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
                           (unsigned long)p->addr, 1, 0);
        WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
 }
-#else  /* !KPROBES_CAN_USE_FTRACE */
+#else  /* !CONFIG_KPROBES_ON_FTRACE */
 #define prepare_kprobe(p)      arch_prepare_kprobe(p)
 #define arm_kprobe_ftrace(p)   do {} while (0)
 #define disarm_kprobe_ftrace(p)        do {} while (0)
@@ -1414,12 +1421,12 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p,
         */
        ftrace_addr = ftrace_location((unsigned long)p->addr);
        if (ftrace_addr) {
-#ifdef KPROBES_CAN_USE_FTRACE
+#ifdef CONFIG_KPROBES_ON_FTRACE
                /* Given address is not on the instruction boundary */
                if ((unsigned long)p->addr != ftrace_addr)
                        return -EILSEQ;
                p->flags |= KPROBE_FLAG_FTRACE;
-#else  /* !KPROBES_CAN_USE_FTRACE */
+#else  /* !CONFIG_KPROBES_ON_FTRACE */
                return -EINVAL;
 #endif
        }
index 250092c1d57d6cf582ef801f70a10969e19abab2..eab08274ec9bf9a1e5b8c83de0f925da0aeaf761 100644 (file)
@@ -188,6 +188,7 @@ struct load_info {
    ongoing or failed initialization etc. */
 static inline int strong_try_module_get(struct module *mod)
 {
+       BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
        if (mod && mod->state == MODULE_STATE_COMING)
                return -EBUSY;
        if (try_module_get(mod))
@@ -343,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 #endif
                };
 
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
+
                if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
                        return true;
        }
@@ -450,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name,
 EXPORT_SYMBOL_GPL(find_symbol);
 
 /* Search for module by name: must hold module_mutex. */
-struct module *find_module(const char *name)
+static struct module *find_module_all(const char *name,
+                                     bool even_unformed)
 {
        struct module *mod;
 
        list_for_each_entry(mod, &modules, list) {
+               if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (strcmp(mod->name, name) == 0)
                        return mod;
        }
        return NULL;
 }
+
+struct module *find_module(const char *name)
+{
+       return find_module_all(name, false);
+}
 EXPORT_SYMBOL_GPL(find_module);
 
 #ifdef CONFIG_SMP
@@ -525,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr)
        preempt_disable();
 
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (!mod->percpu_size)
                        continue;
                for_each_possible_cpu(cpu) {
@@ -1048,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr,
        case MODULE_STATE_GOING:
                state = "going";
                break;
+       default:
+               BUG();
        }
        return sprintf(buffer, "%s\n", state);
 }
@@ -1786,6 +1802,8 @@ void set_all_modules_text_rw(void)
 
        mutex_lock(&module_mutex);
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if ((mod->module_core) && (mod->core_text_size)) {
                        set_page_attributes(mod->module_core,
                                                mod->module_core + mod->core_text_size,
@@ -1807,6 +1825,8 @@ void set_all_modules_text_ro(void)
 
        mutex_lock(&module_mutex);
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if ((mod->module_core) && (mod->core_text_size)) {
                        set_page_attributes(mod->module_core,
                                                mod->module_core + mod->core_text_size,
@@ -2527,6 +2547,13 @@ static int copy_module_from_fd(int fd, struct load_info *info)
                err = -EFBIG;
                goto out;
        }
+
+       /* Don't hand 0 to vmalloc, it whines. */
+       if (stat.size == 0) {
+               err = -EINVAL;
+               goto out;
+       }
+
        info->hdr = vmalloc(stat.size);
        if (!info->hdr) {
                err = -ENOMEM;
@@ -2990,8 +3017,9 @@ static bool finished_loading(const char *name)
        bool ret;
 
        mutex_lock(&module_mutex);
-       mod = find_module(name);
-       ret = !mod || mod->state != MODULE_STATE_COMING;
+       mod = find_module_all(name, true);
+       ret = !mod || mod->state == MODULE_STATE_LIVE
+               || mod->state == MODULE_STATE_GOING;
        mutex_unlock(&module_mutex);
 
        return ret;
@@ -3013,6 +3041,12 @@ static int do_init_module(struct module *mod)
 {
        int ret = 0;
 
+       /*
+        * We want to find out whether @mod uses async during init.  Clear
+        * PF_USED_ASYNC.  async_schedule*() will set it.
+        */
+       current->flags &= ~PF_USED_ASYNC;
+
        blocking_notifier_call_chain(&module_notify_list,
                        MODULE_STATE_COMING, mod);
 
@@ -3058,8 +3092,25 @@ static int do_init_module(struct module *mod)
        blocking_notifier_call_chain(&module_notify_list,
                                     MODULE_STATE_LIVE, mod);
 
-       /* We need to finish all async code before the module init sequence is done */
-       async_synchronize_full();
+       /*
+        * We need to finish all async code before the module init sequence
+        * is done.  This has potential to deadlock.  For example, a newly
+        * detected block device can trigger request_module() of the
+        * default iosched from async probing task.  Once userland helper
+        * reaches here, async_synchronize_full() will wait on the async
+        * task waiting on request_module() and deadlock.
+        *
+        * This deadlock is avoided by perfomring async_synchronize_full()
+        * iff module init queued any async jobs.  This isn't a full
+        * solution as it will deadlock the same if module loading from
+        * async jobs nests more than once; however, due to the various
+        * constraints, this hack seems to be the best option for now.
+        * Please refer to the following thread for details.
+        *
+        * http://thread.gmane.org/gmane.linux.kernel/1420814
+        */
+       if (current->flags & PF_USED_ASYNC)
+               async_synchronize_full();
 
        mutex_lock(&module_mutex);
        /* Drop initial reference. */
@@ -3113,6 +3164,32 @@ static int load_module(struct load_info *info, const char __user *uargs,
                goto free_copy;
        }
 
+       /*
+        * We try to place it in the list now to make sure it's unique
+        * before we dedicate too many resources.  In particular,
+        * temporary percpu memory exhaustion.
+        */
+       mod->state = MODULE_STATE_UNFORMED;
+again:
+       mutex_lock(&module_mutex);
+       if ((old = find_module_all(mod->name, true)) != NULL) {
+               if (old->state == MODULE_STATE_COMING
+                   || old->state == MODULE_STATE_UNFORMED) {
+                       /* Wait in case it fails to load. */
+                       mutex_unlock(&module_mutex);
+                       err = wait_event_interruptible(module_wq,
+                                              finished_loading(mod->name));
+                       if (err)
+                               goto free_module;
+                       goto again;
+               }
+               err = -EEXIST;
+               mutex_unlock(&module_mutex);
+               goto free_module;
+       }
+       list_add_rcu(&mod->list, &modules);
+       mutex_unlock(&module_mutex);
+
 #ifdef CONFIG_MODULE_SIG
        mod->sig_ok = info->sig_ok;
        if (!mod->sig_ok)
@@ -3122,7 +3199,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
        /* Now module is in final location, initialize linked lists, etc. */
        err = module_unload_init(mod);
        if (err)
-               goto free_module;
+               goto unlink_mod;
 
        /* Now we've got everything in the final locations, we can
         * find optional sections. */
@@ -3157,54 +3234,33 @@ static int load_module(struct load_info *info, const char __user *uargs,
                goto free_arch_cleanup;
        }
 
-       /* Mark state as coming so strong_try_module_get() ignores us. */
-       mod->state = MODULE_STATE_COMING;
-
-       /* Now sew it into the lists so we can get lockdep and oops
-        * info during argument parsing.  No one should access us, since
-        * strong_try_module_get() will fail.
-        * lockdep/oops can run asynchronous, so use the RCU list insertion
-        * function to insert in a way safe to concurrent readers.
-        * The mutex protects against concurrent writers.
-        */
-again:
-       mutex_lock(&module_mutex);
-       if ((old = find_module(mod->name)) != NULL) {
-               if (old->state == MODULE_STATE_COMING) {
-                       /* Wait in case it fails to load. */
-                       mutex_unlock(&module_mutex);
-                       err = wait_event_interruptible(module_wq,
-                                              finished_loading(mod->name));
-                       if (err)
-                               goto free_arch_cleanup;
-                       goto again;
-               }
-               err = -EEXIST;
-               goto unlock;
-       }
-
-       /* This has to be done once we're sure module name is unique. */
        dynamic_debug_setup(info->debug, info->num_debug);
 
-       /* Find duplicate symbols */
+       mutex_lock(&module_mutex);
+       /* Find duplicate symbols (must be called under lock). */
        err = verify_export_symbols(mod);
        if (err < 0)
-               goto ddebug;
+               goto ddebug_cleanup;
 
+       /* This relies on module_mutex for list integrity. */
        module_bug_finalize(info->hdr, info->sechdrs, mod);
-       list_add_rcu(&mod->list, &modules);
+
+       /* Mark state as coming so strong_try_module_get() ignores us,
+        * but kallsyms etc. can see us. */
+       mod->state = MODULE_STATE_COMING;
+
        mutex_unlock(&module_mutex);
 
        /* Module is ready to execute: parsing args may do that. */
        err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
                         -32768, 32767, &ddebug_dyndbg_module_param_cb);
        if (err < 0)
-               goto unlink;
+               goto bug_cleanup;
 
        /* Link in to syfs. */
        err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
        if (err < 0)
-               goto unlink;
+               goto bug_cleanup;
 
        /* Get rid of temporary copy. */
        free_copy(info);
@@ -3214,16 +3270,13 @@ again:
 
        return do_init_module(mod);
 
- unlink:
+ bug_cleanup:
+       /* module_bug_cleanup needs module_mutex protection */
        mutex_lock(&module_mutex);
-       /* Unlink carefully: kallsyms could be walking list. */
-       list_del_rcu(&mod->list);
        module_bug_cleanup(mod);
-       wake_up_all(&module_wq);
- ddebug:
-       dynamic_debug_remove(info->debug);
- unlock:
+ ddebug_cleanup:
        mutex_unlock(&module_mutex);
+       dynamic_debug_remove(info->debug);
        synchronize_sched();
        kfree(mod->args);
  free_arch_cleanup:
@@ -3232,6 +3285,12 @@ again:
        free_modinfo(mod);
  free_unload:
        module_unload_free(mod);
+ unlink_mod:
+       mutex_lock(&module_mutex);
+       /* Unlink carefully: kallsyms could be walking list. */
+       list_del_rcu(&mod->list);
+       wake_up_all(&module_wq);
+       mutex_unlock(&module_mutex);
  free_module:
        module_deallocate(mod, info);
  free_copy:
@@ -3354,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_init(addr, mod) ||
                    within_module_core(addr, mod)) {
                        if (modname)
@@ -3377,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_init(addr, mod) ||
                    within_module_core(addr, mod)) {
                        const char *sym;
@@ -3401,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_init(addr, mod) ||
                    within_module_core(addr, mod)) {
                        const char *sym;
@@ -3428,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (symnum < mod->num_symtab) {
                        *value = mod->symtab[symnum].st_value;
                        *type = mod->symtab[symnum].st_info;
@@ -3470,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)
                        ret = mod_find_symname(mod, colon+1);
                *colon = ':';
        } else {
-               list_for_each_entry_rcu(mod, &modules, list)
+               list_for_each_entry_rcu(mod, &modules, list) {
+                       if (mod->state == MODULE_STATE_UNFORMED)
+                               continue;
                        if ((ret = mod_find_symname(mod, name)) != 0)
                                break;
+               }
        }
        preempt_enable();
        return ret;
@@ -3487,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
        int ret;
 
        list_for_each_entry(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                for (i = 0; i < mod->num_symtab; i++) {
                        ret = fn(data, mod->strtab + mod->symtab[i].st_name,
                                 mod, mod->symtab[i].st_value);
@@ -3502,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf)
 {
        int bx = 0;
 
+       BUG_ON(mod->state == MODULE_STATE_UNFORMED);
        if (mod->taints ||
            mod->state == MODULE_STATE_GOING ||
            mod->state == MODULE_STATE_COMING) {
@@ -3543,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p)
        struct module *mod = list_entry(p, struct module, list);
        char buf[8];
 
+       /* We always ignore unformed modules. */
+       if (mod->state == MODULE_STATE_UNFORMED)
+               return 0;
+
        seq_printf(m, "%s %u",
                   mod->name, mod->init_size + mod->core_size);
        print_unload_info(m, mod);
@@ -3603,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (mod->num_exentries == 0)
                        continue;
 
@@ -3651,10 +3730,13 @@ struct module *__module_address(unsigned long addr)
        if (addr < module_addr_min || addr > module_addr_max)
                return NULL;
 
-       list_for_each_entry_rcu(mod, &modules, list)
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                if (within_module_core(addr, mod)
                    || within_module_init(addr, mod))
                        return mod;
+       }
        return NULL;
 }
 EXPORT_SYMBOL_GPL(__module_address);
@@ -3707,8 +3789,11 @@ void print_modules(void)
        printk(KERN_DEFAULT "Modules linked in:");
        /* Most callers should already have preempt disabled, but make sure */
        preempt_disable();
-       list_for_each_entry_rcu(mod, &modules, list)
+       list_for_each_entry_rcu(mod, &modules, list) {
+               if (mod->state == MODULE_STATE_UNFORMED)
+                       continue;
                printk(" %s%s", mod->name, module_flags(mod, buf));
+       }
        preempt_enable();
        if (last_unloaded_module[0])
                printk(" [last unloaded: %s]", last_unloaded_module);
index a307cc9c9526e7c5a5514cfb0988d804e6300f7c..52f23011b6e0f1001eeaa01a6d2f821206cf588b 100644 (file)
@@ -19,6 +19,7 @@
  */
 #include <linux/mutex.h>
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/export.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
index de9af600006f8cd73020b7d25b3dc4fc28be5eab..f2c6a68250989d4223b788bd368b6dd7a80e4e7d 100644 (file)
@@ -331,7 +331,7 @@ out:
        return pid;
 
 out_unlock:
-       spin_unlock(&pidmap_lock);
+       spin_unlock_irq(&pidmap_lock);
 out_free:
        while (++i <= ns->level)
                free_pidmap(pid->numbers + i);
index a278cad1d5d6225a52e6bf00b2fd90f98bbe916e..8fd709c9bb5843bbad0e7e3fd1e1b00075413b29 100644 (file)
@@ -155,11 +155,19 @@ static void bump_cpu_timer(struct k_itimer *timer,
 
 static inline cputime_t prof_ticks(struct task_struct *p)
 {
-       return p->utime + p->stime;
+       cputime_t utime, stime;
+
+       task_cputime(p, &utime, &stime);
+
+       return utime + stime;
 }
 static inline cputime_t virt_ticks(struct task_struct *p)
 {
-       return p->utime;
+       cputime_t utime;
+
+       task_cputime(p, &utime, NULL);
+
+       return utime;
 }
 
 static int
@@ -471,18 +479,23 @@ static void cleanup_timers(struct list_head *head,
  */
 void posix_cpu_timers_exit(struct task_struct *tsk)
 {
+       cputime_t utime, stime;
+
        add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
                                                sizeof(unsigned long long));
+       task_cputime(tsk, &utime, &stime);
        cleanup_timers(tsk->cpu_timers,
-                      tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
+                      utime, stime, tsk->se.sum_exec_runtime);
 
 }
 void posix_cpu_timers_exit_group(struct task_struct *tsk)
 {
        struct signal_struct *const sig = tsk->signal;
+       cputime_t utime, stime;
 
+       task_cputime(tsk, &utime, &stime);
        cleanup_timers(tsk->signal->cpu_timers,
-                      tsk->utime + sig->utime, tsk->stime + sig->stime,
+                      utime + sig->utime, stime + sig->stime,
                       tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
 }
 
@@ -1226,11 +1239,14 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
 static inline int fastpath_timer_check(struct task_struct *tsk)
 {
        struct signal_struct *sig;
+       cputime_t utime, stime;
+
+       task_cputime(tsk, &utime, &stime);
 
        if (!task_cputime_zero(&tsk->cputime_expires)) {
                struct task_cputime task_sample = {
-                       .utime = tsk->utime,
-                       .stime = tsk->stime,
+                       .utime = utime,
+                       .stime = stime,
                        .sum_exec_runtime = tsk->se.sum_exec_runtime
                };
 
@@ -1401,8 +1417,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
                while (!signal_pending(current)) {
                        if (timer.it.cpu.expires.sched == 0) {
                                /*
-                                * Our timer fired and was reset.
+                                * Our timer fired and was reset, below
+                                * deletion can not fail.
                                 */
+                               posix_cpu_timer_del(&timer);
                                spin_unlock_irq(&timer.it_lock);
                                return 0;
                        }
@@ -1420,9 +1438,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
                 * We were interrupted by a signal.
                 */
                sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
-               posix_cpu_timer_set(&timer, 0, &zero_it, it);
+               error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
+               if (!error) {
+                       /*
+                        * Timer is now unarmed, deletion can not fail.
+                        */
+                       posix_cpu_timer_del(&timer);
+               }
                spin_unlock_irq(&timer.it_lock);
 
+               while (error == TIMER_RETRY) {
+                       /*
+                        * We need to handle case when timer was or is in the
+                        * middle of firing. In other cases we already freed
+                        * resources.
+                        */
+                       spin_lock_irq(&timer.it_lock);
+                       error = posix_cpu_timer_del(&timer);
+                       spin_unlock_irq(&timer.it_lock);
+               }
+
                if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
                        /*
                         * It actually did fire already.
index 69185ae6b701742de8977760234c5accda366dfd..10349d5f2ec311afe81c17ff533aec6ddbd83d09 100644 (file)
@@ -997,7 +997,7 @@ SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
 
        err = kc->clock_adj(which_clock, &ktx);
 
-       if (!err && copy_to_user(utx, &ktx, sizeof(ktx)))
+       if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
                return -EFAULT;
 
        return err;
index 357f714ddd4983e75e3816577b237d8c8d38bd3f..f24633afa46a5e7c8c3ae9010548fc95c122d825 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/notifier.h>
 #include <linux/rculist.h>
 #include <linux/poll.h>
+#include <linux/irq_work.h>
 
 #include <asm/uaccess.h>
 
@@ -87,12 +88,6 @@ static DEFINE_SEMAPHORE(console_sem);
 struct console *console_drivers;
 EXPORT_SYMBOL_GPL(console_drivers);
 
-#ifdef CONFIG_LOCKDEP
-static struct lockdep_map console_lock_dep_map = {
-       .name = "console_lock"
-};
-#endif
-
 /*
  * This is used for debugging the mess that is the VT code by
  * keeping track if we have the console semaphore held. It's
@@ -1924,7 +1919,6 @@ void console_lock(void)
                return;
        console_locked = 1;
        console_may_schedule = 1;
-       mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
 }
 EXPORT_SYMBOL(console_lock);
 
@@ -1946,7 +1940,6 @@ int console_trylock(void)
        }
        console_locked = 1;
        console_may_schedule = 0;
-       mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
        return 1;
 }
 EXPORT_SYMBOL(console_trylock);
@@ -1967,30 +1960,32 @@ int is_console_locked(void)
 static DEFINE_PER_CPU(int, printk_pending);
 static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
 
-void printk_tick(void)
+static void wake_up_klogd_work_func(struct irq_work *irq_work)
 {
-       if (__this_cpu_read(printk_pending)) {
-               int pending = __this_cpu_xchg(printk_pending, 0);
-               if (pending & PRINTK_PENDING_SCHED) {
-                       char *buf = __get_cpu_var(printk_sched_buf);
-                       printk(KERN_WARNING "[sched_delayed] %s", buf);
-               }
-               if (pending & PRINTK_PENDING_WAKEUP)
-                       wake_up_interruptible(&log_wait);
+       int pending = __this_cpu_xchg(printk_pending, 0);
+
+       if (pending & PRINTK_PENDING_SCHED) {
+               char *buf = __get_cpu_var(printk_sched_buf);
+               printk(KERN_WARNING "[sched_delayed] %s", buf);
        }
-}
 
-int printk_needs_cpu(int cpu)
-{
-       if (cpu_is_offline(cpu))
-               printk_tick();
-       return __this_cpu_read(printk_pending);
+       if (pending & PRINTK_PENDING_WAKEUP)
+               wake_up_interruptible(&log_wait);
 }
 
+static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
+       .func = wake_up_klogd_work_func,
+       .flags = IRQ_WORK_LAZY,
+};
+
 void wake_up_klogd(void)
 {
-       if (waitqueue_active(&log_wait))
+       preempt_disable();
+       if (waitqueue_active(&log_wait)) {
                this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
+               irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
+       }
+       preempt_enable();
 }
 
 static void console_cont_flush(char *text, size_t size)
@@ -2107,7 +2102,6 @@ skip:
                local_irq_restore(flags);
        }
        console_locked = 0;
-       mutex_release(&console_lock_dep_map, 1, _RET_IP_);
 
        /* Release the exclusive_console once it is used */
        if (unlikely(exclusive_console))
@@ -2471,6 +2465,7 @@ int printk_sched(const char *fmt, ...)
        va_end(args);
 
        __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
+       irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
        local_irq_restore(flags);
 
        return r;
index 1f391819c42fe6d6539f5ba5e66e6f365b877f6d..dc3384ee874e4ed5a0344395757744adc268c9bc 100644 (file)
@@ -37,9 +37,6 @@ struct profile_hit {
 #define NR_PROFILE_HIT         (PAGE_SIZE/sizeof(struct profile_hit))
 #define NR_PROFILE_GRP         (NR_PROFILE_HIT/PROFILE_GRPSZ)
 
-/* Oprofile timer tick hook */
-static int (*timer_hook)(struct pt_regs *) __read_mostly;
-
 static atomic_t *prof_buffer;
 static unsigned long prof_len, prof_shift;
 
@@ -208,25 +205,6 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n)
 }
 EXPORT_SYMBOL_GPL(profile_event_unregister);
 
-int register_timer_hook(int (*hook)(struct pt_regs *))
-{
-       if (timer_hook)
-               return -EBUSY;
-       timer_hook = hook;
-       return 0;
-}
-EXPORT_SYMBOL_GPL(register_timer_hook);
-
-void unregister_timer_hook(int (*hook)(struct pt_regs *))
-{
-       WARN_ON(hook != timer_hook);
-       timer_hook = NULL;
-       /* make sure all CPUs see the NULL hook */
-       synchronize_sched();  /* Allow ongoing interrupts to complete. */
-}
-EXPORT_SYMBOL_GPL(unregister_timer_hook);
-
-
 #ifdef CONFIG_SMP
 /*
  * Each cpu has a pair of open-addressed hashtables for pending
@@ -436,8 +414,6 @@ void profile_tick(int type)
 {
        struct pt_regs *regs = get_irq_regs();
 
-       if (type == CPU_PROFILING && timer_hook)
-               timer_hook(regs);
        if (!user_mode(regs) && prof_cpu_mask != NULL &&
            cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
                profile_hit(type, (void *)profile_pc(regs));
index 1599157336a6c2be56037a510b7c2c28648201b8..acbd28424d812040e060a82ae2d6f28e156e5fe1 100644 (file)
@@ -117,11 +117,45 @@ void __ptrace_unlink(struct task_struct *child)
         * TASK_KILLABLE sleeps.
         */
        if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
-               signal_wake_up(child, task_is_traced(child));
+               ptrace_signal_wake_up(child, true);
 
        spin_unlock(&child->sighand->siglock);
 }
 
+/* Ensure that nothing can wake it up, even SIGKILL */
+static bool ptrace_freeze_traced(struct task_struct *task)
+{
+       bool ret = false;
+
+       /* Lockless, nobody but us can set this flag */
+       if (task->jobctl & JOBCTL_LISTENING)
+               return ret;
+
+       spin_lock_irq(&task->sighand->siglock);
+       if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+               task->state = __TASK_TRACED;
+               ret = true;
+       }
+       spin_unlock_irq(&task->sighand->siglock);
+
+       return ret;
+}
+
+static void ptrace_unfreeze_traced(struct task_struct *task)
+{
+       if (task->state != __TASK_TRACED)
+               return;
+
+       WARN_ON(!task->ptrace || task->parent != current);
+
+       spin_lock_irq(&task->sighand->siglock);
+       if (__fatal_signal_pending(task))
+               wake_up_state(task, __TASK_TRACED);
+       else
+               task->state = TASK_TRACED;
+       spin_unlock_irq(&task->sighand->siglock);
+}
+
 /**
  * ptrace_check_attach - check whether ptracee is ready for ptrace operation
  * @child: ptracee to check for
@@ -139,7 +173,7 @@ void __ptrace_unlink(struct task_struct *child)
  * RETURNS:
  * 0 on success, -ESRCH if %child is not ready.
  */
-int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 {
        int ret = -ESRCH;
 
@@ -151,24 +185,29 @@ int ptrace_check_attach(struct task_struct *child, bool ignore_state)
         * be changed by us so it's not changing right after this.
         */
        read_lock(&tasklist_lock);
-       if ((child->ptrace & PT_PTRACED) && child->parent == current) {
+       if (child->ptrace && child->parent == current) {
+               WARN_ON(child->state == __TASK_TRACED);
                /*
                 * child->sighand can't be NULL, release_task()
                 * does ptrace_unlink() before __exit_signal().
                 */
-               spin_lock_irq(&child->sighand->siglock);
-               WARN_ON_ONCE(task_is_stopped(child));
-               if (ignore_state || (task_is_traced(child) &&
-                                    !(child->jobctl & JOBCTL_LISTENING)))
+               if (ignore_state || ptrace_freeze_traced(child))
                        ret = 0;
-               spin_unlock_irq(&child->sighand->siglock);
        }
        read_unlock(&tasklist_lock);
 
-       if (!ret && !ignore_state)
-               ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
+       if (!ret && !ignore_state) {
+               if (!wait_task_inactive(child, __TASK_TRACED)) {
+                       /*
+                        * This can only happen if may_ptrace_stop() fails and
+                        * ptrace_stop() changes ->state back to TASK_RUNNING,
+                        * so we should not worry about leaking __TASK_TRACED.
+                        */
+                       WARN_ON(child->state == __TASK_TRACED);
+                       ret = -ESRCH;
+               }
+       }
 
-       /* All systems go.. */
        return ret;
 }
 
@@ -317,7 +356,7 @@ static int ptrace_attach(struct task_struct *task, long request,
         */
        if (task_is_stopped(task) &&
            task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
-               signal_wake_up(task, 1);
+               signal_wake_up_state(task, __TASK_STOPPED);
 
        spin_unlock(&task->sighand->siglock);
 
@@ -673,6 +712,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
                                             kiov->iov_len, kiov->iov_base);
 }
 
+/*
+ * This is declared in linux/regset.h and defined in machine-dependent
+ * code.  We put the export here, near the primary machine-neutral use,
+ * to ensure no machine forgets it.
+ */
+EXPORT_SYMBOL_GPL(task_user_regset_view);
 #endif
 
 int ptrace_request(struct task_struct *child, long request,
@@ -737,7 +782,7 @@ int ptrace_request(struct task_struct *child, long request,
                 * tracee into STOP.
                 */
                if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
-                       signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+                       ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
 
                unlock_task_sighand(child, &flags);
                ret = 0;
@@ -763,7 +808,7 @@ int ptrace_request(struct task_struct *child, long request,
                         * start of this trap and now.  Trigger re-trap.
                         */
                        if (child->jobctl & JOBCTL_TRAP_NOTIFY)
-                               signal_wake_up(child, true);
+                               ptrace_signal_wake_up(child, true);
                        ret = 0;
                }
                unlock_task_sighand(child, &flags);
@@ -900,6 +945,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
                goto out_put_task_struct;
 
        ret = arch_ptrace(child, request, addr, data);
+       if (ret || request != PTRACE_DETACH)
+               ptrace_unfreeze_traced(child);
 
  out_put_task_struct:
        put_task_struct(child);
@@ -1039,8 +1086,11 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
 
        ret = ptrace_check_attach(child, request == PTRACE_KILL ||
                                  request == PTRACE_INTERRUPT);
-       if (!ret)
+       if (!ret) {
                ret = compat_arch_ptrace(child, request, addr, data);
+               if (ret || request != PTRACE_DETACH)
+                       ptrace_unfreeze_traced(child);
+       }
 
  out_put_task_struct:
        put_task_struct(child);
index 20dfba576c2b7efde60815321b5dce1c7e049e37..7f8e7590e3e5d414141f3f778caaf58f8b0f6717 100644 (file)
@@ -111,4 +111,11 @@ static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
 
 extern int rcu_expedited;
 
+#ifdef CONFIG_RCU_STALL_COMMON
+
+extern int rcu_cpu_stall_suppress;
+int rcu_jiffies_till_stall_check(void);
+
+#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
+
 #endif /* __LINUX_RCU_H */
index a2cf76177b443d33bb30b19052524db385cb4fc9..48ab70384a4cda7533a65c02915ef253ea767645 100644 (file)
@@ -404,11 +404,65 @@ EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
-void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp)
+void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp,
+                              unsigned long secs,
+                              unsigned long c_old, unsigned long c)
 {
-       trace_rcu_torture_read(rcutorturename, rhp);
+       trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
 }
 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
 #else
-#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
+       do { } while (0)
 #endif
+
+#ifdef CONFIG_RCU_STALL_COMMON
+
+#ifdef CONFIG_PROVE_RCU
+#define RCU_STALL_DELAY_DELTA         (5 * HZ)
+#else
+#define RCU_STALL_DELAY_DELTA         0
+#endif
+
+int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
+int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
+
+module_param(rcu_cpu_stall_suppress, int, 0644);
+module_param(rcu_cpu_stall_timeout, int, 0644);
+
+int rcu_jiffies_till_stall_check(void)
+{
+       int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
+
+       /*
+        * Limit check must be consistent with the Kconfig limits
+        * for CONFIG_RCU_CPU_STALL_TIMEOUT.
+        */
+       if (till_stall_check < 3) {
+               ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
+               till_stall_check = 3;
+       } else if (till_stall_check > 300) {
+               ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
+               till_stall_check = 300;
+       }
+       return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
+}
+
+static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
+{
+       rcu_cpu_stall_suppress = 1;
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block rcu_panic_block = {
+       .notifier_call = rcu_panic,
+};
+
+static int __init check_cpu_stall_init(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
+       return 0;
+}
+early_initcall(check_cpu_stall_init);
+
+#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
index e7dce58f9c2aa3fba2b6b3035d9d1f42d3598399..a0714a51b6d710e8030c33ddaadffe73fe30e07c 100644 (file)
@@ -51,10 +51,10 @@ static void __call_rcu(struct rcu_head *head,
                       void (*func)(struct rcu_head *rcu),
                       struct rcu_ctrlblk *rcp);
 
-#include "rcutiny_plugin.h"
-
 static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 
+#include "rcutiny_plugin.h"
+
 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
 static void rcu_idle_enter_common(long long newval)
 {
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(rcu_is_cpu_idle);
  * interrupts don't count, we must be running at the first interrupt
  * level.
  */
-int rcu_is_cpu_rrupt_from_idle(void)
+static int rcu_is_cpu_rrupt_from_idle(void)
 {
        return rcu_dynticks_nesting <= 1;
 }
@@ -205,6 +205,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
  */
 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
 {
+       reset_cpu_stall_ticks(rcp);
        if (rcp->rcucblist != NULL &&
            rcp->donetail != rcp->curtail) {
                rcp->donetail = rcp->curtail;
@@ -251,6 +252,7 @@ void rcu_bh_qs(int cpu)
  */
 void rcu_check_callbacks(int cpu, int user)
 {
+       check_cpu_stalls();
        if (user || rcu_is_cpu_rrupt_from_idle())
                rcu_sched_qs(cpu);
        else if (!in_softirq())
index f85016a2309b94c08121f0e3346c1c24d81d4c18..8a233002faeba228d6195481f0812af703b7e9f3 100644 (file)
@@ -33,6 +33,9 @@ struct rcu_ctrlblk {
        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
        struct rcu_head **curtail;      /* ->next pointer of last CB. */
        RCU_TRACE(long qlen);           /* Number of pending CBs. */
+       RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
+       RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
+       RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
        RCU_TRACE(char *name);          /* Name of RCU type. */
 };
 
@@ -54,6 +57,51 @@ int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
+#ifdef CONFIG_RCU_TRACE
+
+static void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+       unsigned long j;
+       unsigned long js;
+
+       if (rcu_cpu_stall_suppress)
+               return;
+       rcp->ticks_this_gp++;
+       j = jiffies;
+       js = rcp->jiffies_stall;
+       if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
+               pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
+                      rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
+                      jiffies - rcp->gp_start, rcp->qlen);
+               dump_stack();
+       }
+       if (*rcp->curtail && ULONG_CMP_GE(j, js))
+               rcp->jiffies_stall = jiffies +
+                       3 * rcu_jiffies_till_stall_check() + 3;
+       else if (ULONG_CMP_GE(j, js))
+               rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
+}
+
+static void check_cpu_stall_preempt(void);
+
+#endif /* #ifdef CONFIG_RCU_TRACE */
+
+static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
+{
+#ifdef CONFIG_RCU_TRACE
+       rcp->ticks_this_gp = 0;
+       rcp->gp_start = jiffies;
+       rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
+#endif /* #ifdef CONFIG_RCU_TRACE */
+}
+
+static void check_cpu_stalls(void)
+{
+       RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
+       RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
+       RCU_TRACE(check_cpu_stall_preempt());
+}
+
 #ifdef CONFIG_TINY_PREEMPT_RCU
 
 #include <linux/delay.h>
@@ -448,6 +496,7 @@ static void rcu_preempt_start_gp(void)
                /* Official start of GP. */
                rcu_preempt_ctrlblk.gpnum++;
                RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
+               reset_cpu_stall_ticks(&rcu_preempt_ctrlblk.rcb);
 
                /* Any blocked RCU readers block new GP. */
                if (rcu_preempt_blocked_readers_any())
@@ -1054,4 +1103,11 @@ MODULE_AUTHOR("Paul E. McKenney");
 MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
 MODULE_LICENSE("GPL");
 
+static void check_cpu_stall_preempt(void)
+{
+#ifdef CONFIG_TINY_PREEMPT_RCU
+       check_cpu_stall(&rcu_preempt_ctrlblk.rcb);
+#endif /* #ifdef CONFIG_TINY_PREEMPT_RCU */
+}
+
 #endif /* #ifdef CONFIG_RCU_TRACE */
index 31dea01c85fd50fb3297627dc81556a146f249c5..e1f3a8c96724374be64e1cf252a40f2e9eac5a70 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/stat.h>
 #include <linux/srcu.h>
 #include <linux/slab.h>
+#include <linux/trace_clock.h>
 #include <asm/byteorder.h>
 
 MODULE_LICENSE("GPL");
@@ -207,6 +208,20 @@ MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
 #define rcu_can_boost() 0
 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
 
+#ifdef CONFIG_RCU_TRACE
+static u64 notrace rcu_trace_clock_local(void)
+{
+       u64 ts = trace_clock_local();
+       unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
+       return ts;
+}
+#else /* #ifdef CONFIG_RCU_TRACE */
+static u64 notrace rcu_trace_clock_local(void)
+{
+       return 0ULL;
+}
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
 static unsigned long shutdown_time;    /* jiffies to system shutdown. */
 static unsigned long boost_starttime;  /* jiffies of next boost test start. */
 DEFINE_MUTEX(boost_mutex);             /* protect setting boost_starttime */
@@ -845,7 +860,7 @@ static int rcu_torture_boost(void *arg)
                /* Wait for the next test interval. */
                oldstarttime = boost_starttime;
                while (ULONG_CMP_LT(jiffies, oldstarttime)) {
-                       schedule_timeout_uninterruptible(1);
+                       schedule_timeout_interruptible(oldstarttime - jiffies);
                        rcu_stutter_wait("rcu_torture_boost");
                        if (kthread_should_stop() ||
                            fullstop != FULLSTOP_DONTSTOP)
@@ -1028,7 +1043,6 @@ void rcutorture_trace_dump(void)
                return;
        if (atomic_xchg(&beenhere, 1) != 0)
                return;
-       do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
        ftrace_dump(DUMP_ALL);
 }
 
@@ -1042,13 +1056,16 @@ static void rcu_torture_timer(unsigned long unused)
 {
        int idx;
        int completed;
+       int completed_end;
        static DEFINE_RCU_RANDOM(rand);
        static DEFINE_SPINLOCK(rand_lock);
        struct rcu_torture *p;
        int pipe_count;
+       unsigned long long ts;
 
        idx = cur_ops->readlock();
        completed = cur_ops->completed();
+       ts = rcu_trace_clock_local();
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
                                  rcu_read_lock_sched_held() ||
@@ -1058,7 +1075,6 @@ static void rcu_torture_timer(unsigned long unused)
                cur_ops->readunlock(idx);
                return;
        }
-       do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
        if (p->rtort_mbtest == 0)
                atomic_inc(&n_rcu_torture_mberror);
        spin_lock(&rand_lock);
@@ -1071,10 +1087,14 @@ static void rcu_torture_timer(unsigned long unused)
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       if (pipe_count > 1)
+       completed_end = cur_ops->completed();
+       if (pipe_count > 1) {
+               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
+                                         completed, completed_end);
                rcutorture_trace_dump();
+       }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
-       completed = cur_ops->completed() - completed;
+       completed = completed_end - completed;
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
@@ -1094,11 +1114,13 @@ static int
 rcu_torture_reader(void *arg)
 {
        int completed;
+       int completed_end;
        int idx;
        DEFINE_RCU_RANDOM(rand);
        struct rcu_torture *p;
        int pipe_count;
        struct timer_list t;
+       unsigned long long ts;
 
        VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
        set_user_nice(current, 19);
@@ -1112,6 +1134,7 @@ rcu_torture_reader(void *arg)
                }
                idx = cur_ops->readlock();
                completed = cur_ops->completed();
+               ts = rcu_trace_clock_local();
                p = rcu_dereference_check(rcu_torture_current,
                                          rcu_read_lock_bh_held() ||
                                          rcu_read_lock_sched_held() ||
@@ -1122,7 +1145,6 @@ rcu_torture_reader(void *arg)
                        schedule_timeout_interruptible(HZ);
                        continue;
                }
-               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
                if (p->rtort_mbtest == 0)
                        atomic_inc(&n_rcu_torture_mberror);
                cur_ops->read_delay(&rand);
@@ -1132,10 +1154,14 @@ rcu_torture_reader(void *arg)
                        /* Should not happen, but... */
                        pipe_count = RCU_TORTURE_PIPE_LEN;
                }
-               if (pipe_count > 1)
+               completed_end = cur_ops->completed();
+               if (pipe_count > 1) {
+                       do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+                                                 ts, completed, completed_end);
                        rcutorture_trace_dump();
+               }
                __this_cpu_inc(rcu_torture_count[pipe_count]);
-               completed = cur_ops->completed() - completed;
+               completed = completed_end - completed;
                if (completed > RCU_TORTURE_PIPE_LEN) {
                        /* Should not happen, but... */
                        completed = RCU_TORTURE_PIPE_LEN;
@@ -1301,19 +1327,35 @@ static void rcu_torture_shuffle_tasks(void)
                                set_cpus_allowed_ptr(reader_tasks[i],
                                                     shuffle_tmp_mask);
        }
-
        if (fakewriter_tasks) {
                for (i = 0; i < nfakewriters; i++)
                        if (fakewriter_tasks[i])
                                set_cpus_allowed_ptr(fakewriter_tasks[i],
                                                     shuffle_tmp_mask);
        }
-
        if (writer_task)
                set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
-
        if (stats_task)
                set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
+       if (stutter_task)
+               set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
+       if (fqs_task)
+               set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
+       if (shutdown_task)
+               set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
+#ifdef CONFIG_HOTPLUG_CPU
+       if (onoff_task)
+               set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+       if (stall_task)
+               set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
+       if (barrier_cbs_tasks)
+               for (i = 0; i < n_barrier_cbs; i++)
+                       if (barrier_cbs_tasks[i])
+                               set_cpus_allowed_ptr(barrier_cbs_tasks[i],
+                                                    shuffle_tmp_mask);
+       if (barrier_task)
+               set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
 
        if (rcu_idle_cpu == -1)
                rcu_idle_cpu = num_online_cpus() - 1;
@@ -1749,7 +1791,7 @@ static int rcu_torture_barrier_init(void)
        barrier_cbs_wq =
                kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
                        GFP_KERNEL);
-       if (barrier_cbs_tasks == NULL || barrier_cbs_wq == 0)
+       if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
                return -ENOMEM;
        for (i = 0; i < n_barrier_cbs; i++) {
                init_waitqueue_head(&barrier_cbs_wq[i]);
index e441b77b614e1eb74b1f764710dc13f2398de17a..5b8ad827fd86e74932a0b42b40c8f5c27e370228 100644 (file)
@@ -105,7 +105,7 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
  * The rcu_scheduler_active variable transitions from zero to one just
  * before the first task is spawned.  So when this variable is zero, RCU
  * can assume that there is but one task, allowing RCU to (for example)
- * optimized synchronize_sched() to a simple barrier().  When this variable
+ * optimize synchronize_sched() to a simple barrier().  When this variable
  * is one, RCU must actually do all the hard work required to detect real
  * grace periods.  This variable is also used to suppress boot-time false
  * positives from lockdep-RCU error checking.
@@ -217,12 +217,6 @@ module_param(blimit, long, 0444);
 module_param(qhimark, long, 0444);
 module_param(qlowmark, long, 0444);
 
-int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
-int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
-
-module_param(rcu_cpu_stall_suppress, int, 0644);
-module_param(rcu_cpu_stall_timeout, int, 0644);
-
 static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS;
 static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS;
 
@@ -305,17 +299,27 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
 }
 
 /*
- * Does the current CPU require a yet-as-unscheduled grace period?
+ * Does the current CPU require a not-yet-started grace period?
+ * The caller must have disabled interrupts to prevent races with
+ * normal callback registry.
  */
 static int
 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       struct rcu_head **ntp;
+       int i;
 
-       ntp = rdp->nxttail[RCU_DONE_TAIL +
-                          (ACCESS_ONCE(rsp->completed) != rdp->completed)];
-       return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp &&
-              !rcu_gp_in_progress(rsp);
+       if (rcu_gp_in_progress(rsp))
+               return 0;  /* No, a grace period is already in progress. */
+       if (!rdp->nxttail[RCU_NEXT_TAIL])
+               return 0;  /* No, this is a no-CBs (or offline) CPU. */
+       if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
+               return 1;  /* Yes, this CPU has newly registered callbacks. */
+       for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
+               if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
+                   ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
+                                rdp->nxtcompleted[i]))
+                       return 1;  /* Yes, CBs for future grace period. */
+       return 0; /* No grace period needed. */
 }
 
 /*
@@ -336,7 +340,7 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
                                bool user)
 {
-       trace_rcu_dyntick("Start", oldval, 0);
+       trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
        if (!user && !is_idle_task(current)) {
                struct task_struct *idle = idle_task(smp_processor_id());
 
@@ -727,7 +731,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
  * interrupt from idle, return true.  The caller must have at least
  * disabled preemption.
  */
-int rcu_is_cpu_rrupt_from_idle(void)
+static int rcu_is_cpu_rrupt_from_idle(void)
 {
        return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
 }
@@ -793,28 +797,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        return 0;
 }
 
-static int jiffies_till_stall_check(void)
-{
-       int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
-
-       /*
-        * Limit check must be consistent with the Kconfig limits
-        * for CONFIG_RCU_CPU_STALL_TIMEOUT.
-        */
-       if (till_stall_check < 3) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
-               till_stall_check = 3;
-       } else if (till_stall_check > 300) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
-               till_stall_check = 300;
-       }
-       return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
-}
-
 static void record_gp_stall_check_time(struct rcu_state *rsp)
 {
        rsp->gp_start = jiffies;
-       rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
+       rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
 }
 
 /*
@@ -857,7 +843,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
-       rsp->jiffies_stall = jiffies + 3 * jiffies_till_stall_check() + 3;
+       rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -935,7 +921,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
        raw_spin_lock_irqsave(&rnp->lock, flags);
        if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
                rsp->jiffies_stall = jiffies +
-                                    3 * jiffies_till_stall_check() + 3;
+                                    3 * rcu_jiffies_till_stall_check() + 3;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        set_need_resched();  /* kick ourselves to get things going. */
@@ -966,12 +952,6 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 }
 
-static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
-{
-       rcu_cpu_stall_suppress = 1;
-       return NOTIFY_DONE;
-}
-
 /**
  * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  *
@@ -989,15 +969,6 @@ void rcu_cpu_stall_reset(void)
                rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
 }
 
-static struct notifier_block rcu_panic_block = {
-       .notifier_call = rcu_panic,
-};
-
-static void __init check_cpu_stall_init(void)
-{
-       atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
-}
-
 /*
  * Update CPU-local rcu_data state to record the newly noticed grace period.
  * This is used both when we started the grace period and when we notice
@@ -1070,6 +1041,145 @@ static void init_callback_list(struct rcu_data *rdp)
        init_nocb_callback_list(rdp);
 }
 
+/*
+ * Determine the value that ->completed will have at the end of the
+ * next subsequent grace period.  This is used to tag callbacks so that
+ * a CPU can invoke callbacks in a timely fashion even if that CPU has
+ * been dyntick-idle for an extended period with callbacks under the
+ * influence of RCU_FAST_NO_HZ.
+ *
+ * The caller must hold rnp->lock with interrupts disabled.
+ */
+static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
+                                      struct rcu_node *rnp)
+{
+       /*
+        * If RCU is idle, we just wait for the next grace period.
+        * But we can only be sure that RCU is idle if we are looking
+        * at the root rcu_node structure -- otherwise, a new grace
+        * period might have started, but just not yet gotten around
+        * to initializing the current non-root rcu_node structure.
+        */
+       if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
+               return rnp->completed + 1;
+
+       /*
+        * Otherwise, wait for a possible partial grace period and
+        * then the subsequent full grace period.
+        */
+       return rnp->completed + 2;
+}
+
+/*
+ * If there is room, assign a ->completed number to any callbacks on
+ * this CPU that have not already been assigned.  Also accelerate any
+ * callbacks that were previously assigned a ->completed number that has
+ * since proven to be too conservative, which can happen if callbacks get
+ * assigned a ->completed number while RCU is idle, but with reference to
+ * a non-root rcu_node structure.  This function is idempotent, so it does
+ * not hurt to call it repeatedly.
+ *
+ * The caller must hold rnp->lock with interrupts disabled.
+ */
+static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
+                              struct rcu_data *rdp)
+{
+       unsigned long c;
+       int i;
+
+       /* If the CPU has no callbacks, nothing to do. */
+       if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
+               return;
+
+       /*
+        * Starting from the sublist containing the callbacks most
+        * recently assigned a ->completed number and working down, find the
+        * first sublist that is not assignable to an upcoming grace period.
+        * Such a sublist has something in it (first two tests) and has
+        * a ->completed number assigned that will complete sooner than
+        * the ->completed number for newly arrived callbacks (last test).
+        *
+        * The key point is that any later sublist can be assigned the
+        * same ->completed number as the newly arrived callbacks, which
+        * means that the callbacks in any of these later sublist can be
+        * grouped into a single sublist, whether or not they have already
+        * been assigned a ->completed number.
+        */
+       c = rcu_cbs_completed(rsp, rnp);
+       for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
+               if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
+                   !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
+                       break;
+
+       /*
+        * If there are no sublist for unassigned callbacks, leave.
+        * At the same time, advance "i" one sublist, so that "i" will
+        * index into the sublist where all the remaining callbacks should
+        * be grouped into.
+        */
+       if (++i >= RCU_NEXT_TAIL)
+               return;
+
+       /*
+        * Assign all subsequent callbacks' ->completed number to the next
+        * full grace period and group them all in the sublist initially
+        * indexed by "i".
+        */
+       for (; i <= RCU_NEXT_TAIL; i++) {
+               rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
+               rdp->nxtcompleted[i] = c;
+       }
+
+       /* Trace depending on how much we were able to accelerate. */
+       if (!*rdp->nxttail[RCU_WAIT_TAIL])
+               trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB");
+       else
+               trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB");
+}
+
+/*
+ * Move any callbacks whose grace period has completed to the
+ * RCU_DONE_TAIL sublist, then compact the remaining sublists and
+ * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
+ * sublist.  This function is idempotent, so it does not hurt to
+ * invoke it repeatedly.  As long as it is not invoked -too- often...
+ *
+ * The caller must hold rnp->lock with interrupts disabled.
+ */
+static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
+                           struct rcu_data *rdp)
+{
+       int i, j;
+
+       /* If the CPU has no callbacks, nothing to do. */
+       if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
+               return;
+
+       /*
+        * Find all callbacks whose ->completed numbers indicate that they
+        * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
+        */
+       for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
+               if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
+                       break;
+               rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
+       }
+       /* Clean up any sublist tail pointers that were misordered above. */
+       for (j = RCU_WAIT_TAIL; j < i; j++)
+               rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
+
+       /* Copy down callbacks to fill in empty sublists. */
+       for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
+               if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
+                       break;
+               rdp->nxttail[j] = rdp->nxttail[i];
+               rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
+       }
+
+       /* Classify any remaining callbacks. */
+       rcu_accelerate_cbs(rsp, rnp, rdp);
+}
+
 /*
  * Advance this CPU's callbacks, but only if the current grace period
  * has ended.  This may be called only from the CPU to whom the rdp
@@ -1080,12 +1190,15 @@ static void
 __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
 {
        /* Did another grace period end? */
-       if (rdp->completed != rnp->completed) {
+       if (rdp->completed == rnp->completed) {
 
-               /* Advance callbacks.  No harm if list empty. */
-               rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
-               rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
-               rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+               /* No, so just accelerate recent callbacks. */
+               rcu_accelerate_cbs(rsp, rnp, rdp);
+
+       } else {
+
+               /* Advance callbacks. */
+               rcu_advance_cbs(rsp, rnp, rdp);
 
                /* Remember that we saw this grace-period completion. */
                rdp->completed = rnp->completed;
@@ -1392,17 +1505,10 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
        /*
         * Because there is no grace period in progress right now,
         * any callbacks we have up to this point will be satisfied
-        * by the next grace period.  So promote all callbacks to be
-        * handled after the end of the next grace period.  If the
-        * CPU is not yet aware of the end of the previous grace period,
-        * we need to allow for the callback advancement that will
-        * occur when it does become aware.  Deadlock prevents us from
-        * making it aware at this point: We cannot acquire a leaf
-        * rcu_node ->lock while holding the root rcu_node ->lock.
+        * by the next grace period.  So this is a good place to
+        * assign a grace period number to recently posted callbacks.
         */
-       rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
-       if (rdp->completed == rsp->completed)
-               rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+       rcu_accelerate_cbs(rsp, rnp, rdp);
 
        rsp->gp_flags = RCU_GP_FLAG_INIT;
        raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
@@ -1527,7 +1633,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 * This GP can't end until cpu checks in, so all of our
                 * callbacks can be processed during the next GP.
                 */
-               rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+               rcu_accelerate_cbs(rsp, rnp, rdp);
 
                rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
        }
@@ -1779,7 +1885,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        long bl, count, count_lazy;
        int i;
 
-       /* If no callbacks are ready, just return.*/
+       /* If no callbacks are ready, just return. */
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
                trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
                trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
@@ -2008,19 +2114,19 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 
        WARN_ON_ONCE(rdp->beenonline == 0);
 
-       /*
-        * Advance callbacks in response to end of earlier grace
-        * period that some other CPU ended.
-        */
+       /* Handle the end of a grace period that some other CPU ended.  */
        rcu_process_gp_end(rsp, rdp);
 
        /* Update RCU state based on any recent quiescent states. */
        rcu_check_quiescent_state(rsp, rdp);
 
        /* Does this CPU require a not-yet-started grace period? */
+       local_irq_save(flags);
        if (cpu_needs_another_gp(rsp, rdp)) {
-               raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
+               raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
                rcu_start_gp(rsp, flags);  /* releases above lock */
+       } else {
+               local_irq_restore(flags);
        }
 
        /* If there are callbacks ready, invoke them. */
@@ -2719,9 +2825,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
-#ifdef CONFIG_RCU_USER_QS
-       WARN_ON_ONCE(rdp->dynticks->in_user);
-#endif
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -2938,6 +3041,10 @@ static void __init rcu_init_one(struct rcu_state *rsp,
 
        BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
 
+       /* Silence gcc 4.8 warning about array index out of range. */
+       if (rcu_num_lvls > RCU_NUM_LVLS)
+               panic("rcu_init_one: rcu_num_lvls overflow");
+
        /* Initialize the level-tracking arrays. */
 
        for (i = 0; i < rcu_num_lvls; i++)
@@ -3074,7 +3181,6 @@ void __init rcu_init(void)
        cpu_notifier(rcu_cpu_notify, 0);
        for_each_online_cpu(cpu)
                rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
-       check_cpu_stall_init();
 }
 
 #include "rcutree_plugin.h"
index 4b69291b093d7157d638da01d342c2ee3d1f2d16..c896b5045d9dcb5c67665b4caf1d222e103d56ff 100644 (file)
@@ -102,10 +102,6 @@ struct rcu_dynticks {
                                    /* idle-period nonlazy_posted snapshot. */
        int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
-#ifdef CONFIG_RCU_USER_QS
-       bool ignore_user_qs;        /* Treat userspace as extended QS or not */
-       bool in_user;               /* Is the CPU in userland from RCU POV? */
-#endif
 };
 
 /* RCU's kthread states for tracing. */
@@ -282,6 +278,8 @@ struct rcu_data {
         */
        struct rcu_head *nxtlist;
        struct rcu_head **nxttail[RCU_NEXT_SIZE];
+       unsigned long   nxtcompleted[RCU_NEXT_SIZE];
+                                       /* grace periods for sublists. */
        long            qlen_lazy;      /* # of lazy queued callbacks */
        long            qlen;           /* # of queued callbacks, incl lazy */
        long            qlen_last_fqs_check;
@@ -343,11 +341,6 @@ struct rcu_data {
 
 #define RCU_JIFFIES_TILL_FORCE_QS       3      /* for rsp->jiffies_force_qs */
 
-#ifdef CONFIG_PROVE_RCU
-#define RCU_STALL_DELAY_DELTA         (5 * HZ)
-#else
-#define RCU_STALL_DELAY_DELTA         0
-#endif
 #define RCU_STALL_RAT_DELAY            2       /* Allow other CPUs time */
                                                /*  to take at least one */
                                                /*  scheduling clock irq */
index f6e5ec2932b4aed0ff40016e8fdf77e5b9c1f366..c1cc7e17ff9d9dc29db596bca0468056bd3f0665 100644 (file)
@@ -40,8 +40,7 @@
 #ifdef CONFIG_RCU_NOCB_CPU
 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
 static bool have_rcu_nocb_mask;            /* Was rcu_nocb_mask allocated? */
-static bool rcu_nocb_poll;         /* Offload kthread are to poll. */
-module_param(rcu_nocb_poll, bool, 0444);
+static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
 static char __initdata nocb_buf[NR_CPUS * 5];
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
 }
 __setup("rcu_nocbs=", rcu_nocb_setup);
 
+static int __init parse_rcu_nocb_poll(char *arg)
+{
+       rcu_nocb_poll = 1;
+       return 0;
+}
+early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
+
 /* Is the specified CPU a no-CPUs CPU? */
 static bool is_nocb_cpu(int cpu)
 {
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
        for (;;) {
                /* If not polling, wait for next batch of callbacks. */
                if (!rcu_nocb_poll)
-                       wait_event(rdp->nocb_wq, rdp->nocb_head);
+                       wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
                list = ACCESS_ONCE(rdp->nocb_head);
                if (!list) {
                        schedule_timeout_interruptible(1);
+                       flush_signals(current);
                        continue;
                }
 
index 16502d3a71c8af4f726eb8ea3e5f9ea6e6e943c5..13b243a323fa0a5dfcfd965a0eb39f7d4e35fe45 100644 (file)
@@ -17,6 +17,7 @@
  * See rt.c in preempt-rt for proper credits and further information
  */
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/delay.h>
 #include <linux/export.h>
 #include <linux/spinlock.h>
index 98ec494754608a18b93625bf54f53fb2e0617bf0..7890b10084a71e4deff2a1ed652c7011358fc3b4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kthread.h>
 #include <linux/export.h>
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <linux/freezer.h>
index a242e691c993038f35ace870d245487d44b5bc16..1e09308bf2a1e5f46c3f68d02bc4bcb346b42ec5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/spinlock.h>
 #include <linux/export.h>
 #include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include <linux/timer.h>
 
 #include "rtmutex_common.h"
index 6850f53e02d82e023954df56f5230b4033e80b73..b3c6c3fcd8474237a2e41ab4db87ee04c2435b7b 100644 (file)
@@ -116,6 +116,16 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
 
 EXPORT_SYMBOL(down_read_nested);
 
+void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
+{
+       might_sleep();
+       rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
+
+       LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
+}
+
+EXPORT_SYMBOL(_down_write_nest_lock);
+
 void down_write_nested(struct rw_semaphore *sem, int subclass)
 {
        might_sleep();
index 0984a21076a3ed8a37e56d5381bff6320d8fa5f2..64de5f8b0c9ed654ec475c2cae33fe539a1d43ca 100644 (file)
@@ -35,6 +35,7 @@ static inline void autogroup_destroy(struct kref *kref)
        ag->tg->rt_se = NULL;
        ag->tg->rt_rq = NULL;
 #endif
+       sched_offline_group(ag->tg);
        sched_destroy_group(ag->tg);
 }
 
@@ -76,6 +77,8 @@ static inline struct autogroup *autogroup_create(void)
        if (IS_ERR(tg))
                goto out_free;
 
+       sched_online_group(tg, &root_task_group);
+
        kref_init(&ag->kref);
        init_rwsem(&ag->lock);
        ag->id = atomic_inc_return(&autogroup_seq_nr);
index 257002c13bb02acad92c74347e3b38ca3bc881b1..3a673a3b0c6bb7ffbfcb2897220a2806b320b6d5 100644 (file)
@@ -83,7 +83,7 @@
 #endif
 
 #include "sched.h"
-#include "../workqueue_sched.h"
+#include "../workqueue_internal.h"
 #include "../smpboot.h"
 
 #define CREATE_TRACE_POINTS
@@ -1523,7 +1523,8 @@ out:
  */
 int wake_up_process(struct task_struct *p)
 {
-       return try_to_wake_up(p, TASK_ALL, 0);
+       WARN_ON(task_is_stopped_or_traced(p));
+       return try_to_wake_up(p, TASK_NORMAL, 0);
 }
 EXPORT_SYMBOL(wake_up_process);
 
@@ -4370,7 +4371,7 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
        struct task_struct *curr = current;
        struct rq *rq, *p_rq;
        unsigned long flags;
-       bool yielded = 0;
+       int yielded = 0;
 
        local_irq_save(flags);
        rq = this_rq();
@@ -4666,6 +4667,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
+       vtime_init_idle(idle);
 #if defined(CONFIG_SMP)
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
@@ -7159,7 +7161,6 @@ static void free_sched_group(struct task_group *tg)
 struct task_group *sched_create_group(struct task_group *parent)
 {
        struct task_group *tg;
-       unsigned long flags;
 
        tg = kzalloc(sizeof(*tg), GFP_KERNEL);
        if (!tg)
@@ -7171,6 +7172,17 @@ struct task_group *sched_create_group(struct task_group *parent)
        if (!alloc_rt_sched_group(tg, parent))
                goto err;
 
+       return tg;
+
+err:
+       free_sched_group(tg);
+       return ERR_PTR(-ENOMEM);
+}
+
+void sched_online_group(struct task_group *tg, struct task_group *parent)
+{
+       unsigned long flags;
+
        spin_lock_irqsave(&task_group_lock, flags);
        list_add_rcu(&tg->list, &task_groups);
 
@@ -7180,12 +7192,6 @@ struct task_group *sched_create_group(struct task_group *parent)
        INIT_LIST_HEAD(&tg->children);
        list_add_rcu(&tg->siblings, &parent->children);
        spin_unlock_irqrestore(&task_group_lock, flags);
-
-       return tg;
-
-err:
-       free_sched_group(tg);
-       return ERR_PTR(-ENOMEM);
 }
 
 /* rcu callback to free various structures associated with a task group */
@@ -7197,6 +7203,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp)
 
 /* Destroy runqueue etc associated with a task group */
 void sched_destroy_group(struct task_group *tg)
+{
+       /* wait for possible concurrent references to cfs_rqs complete */
+       call_rcu(&tg->rcu, free_sched_group_rcu);
+}
+
+void sched_offline_group(struct task_group *tg)
 {
        unsigned long flags;
        int i;
@@ -7209,9 +7221,6 @@ void sched_destroy_group(struct task_group *tg)
        list_del_rcu(&tg->list);
        list_del_rcu(&tg->siblings);
        spin_unlock_irqrestore(&task_group_lock, flags);
-
-       /* wait for possible concurrent references to cfs_rqs complete */
-       call_rcu(&tg->rcu, free_sched_group_rcu);
 }
 
 /* change task's runqueue when it moves between groups.
@@ -7507,6 +7516,25 @@ static int sched_rt_global_constraints(void)
 }
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+int sched_rr_handler(struct ctl_table *table, int write,
+               void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret;
+       static DEFINE_MUTEX(mutex);
+
+       mutex_lock(&mutex);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       /* make sure that internally we keep jiffies */
+       /* also, writing zero resets timeslice to default */
+       if (!ret && write) {
+               sched_rr_timeslice = sched_rr_timeslice <= 0 ?
+                       RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice);
+       }
+       mutex_unlock(&mutex);
+       return ret;
+}
+
 int sched_rt_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -7563,6 +7591,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
        return &tg->css;
 }
 
+static int cpu_cgroup_css_online(struct cgroup *cgrp)
+{
+       struct task_group *tg = cgroup_tg(cgrp);
+       struct task_group *parent;
+
+       if (!cgrp->parent)
+               return 0;
+
+       parent = cgroup_tg(cgrp->parent);
+       sched_online_group(tg, parent);
+       return 0;
+}
+
 static void cpu_cgroup_css_free(struct cgroup *cgrp)
 {
        struct task_group *tg = cgroup_tg(cgrp);
@@ -7570,6 +7611,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)
        sched_destroy_group(tg);
 }
 
+static void cpu_cgroup_css_offline(struct cgroup *cgrp)
+{
+       struct task_group *tg = cgroup_tg(cgrp);
+
+       sched_offline_group(tg);
+}
+
 static int cpu_cgroup_can_attach(struct cgroup *cgrp,
                                 struct cgroup_taskset *tset)
 {
@@ -7925,6 +7973,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .name           = "cpu",
        .css_alloc      = cpu_cgroup_css_alloc,
        .css_free       = cpu_cgroup_css_free,
+       .css_online     = cpu_cgroup_css_online,
+       .css_offline    = cpu_cgroup_css_offline,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
index 23aa789c53ee5c6e061c6bf2b0f8181292695044..1095e878a46fdbe4a7224eb07a68cf1fc90c2530 100644 (file)
@@ -28,6 +28,8 @@
  */
 
 #include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
 #include "cpupri.h"
 
 /* Convert between a 140 based task->prio, and our 102 based cpupri */
index 293b202fcf79e1ba577d0672598ba59f4e8ebf56..9857329ed2804a6b3255bd402a73722b8ae33ecc 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/tsacct_kern.h>
 #include <linux/kernel_stat.h>
 #include <linux/static_key.h>
+#include <linux/context_tracking.h>
 #include "sched.h"
 
 
@@ -163,7 +164,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
        task_group_account_field(p, index, (__force u64) cputime);
 
        /* Account for user time used */
-       acct_update_integrals(p);
+       acct_account_cputime(p);
 }
 
 /*
@@ -213,7 +214,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
        task_group_account_field(p, index, (__force u64) cputime);
 
        /* Account for system time used */
-       acct_update_integrals(p);
+       acct_account_cputime(p);
 }
 
 /*
@@ -295,6 +296,7 @@ static __always_inline bool steal_account_process_tick(void)
 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 {
        struct signal_struct *sig = tsk->signal;
+       cputime_t utime, stime;
        struct task_struct *t;
 
        times->utime = sig->utime;
@@ -308,16 +310,15 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 
        t = tsk;
        do {
-               times->utime += t->utime;
-               times->stime += t->stime;
+               task_cputime(tsk, &utime, &stime);
+               times->utime += utime;
+               times->stime += stime;
                times->sum_exec_runtime += task_sched_runtime(t);
        } while_each_thread(tsk, t);
 out:
        rcu_read_unlock();
 }
 
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 /*
  * Account a tick to a process and cpustat
@@ -382,11 +383,12 @@ static void irqtime_account_idle_ticks(int ticks)
                irqtime_account_process_tick(current, 0, rq);
 }
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
-static void irqtime_account_idle_ticks(int ticks) {}
-static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
+static inline void irqtime_account_idle_ticks(int ticks) {}
+static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
                                                struct rq *rq) {}
 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /*
  * Account a single tick of cpu time.
  * @p: the process that the cpu time gets accounted to
@@ -397,6 +399,9 @@ void account_process_tick(struct task_struct *p, int user_tick)
        cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
        struct rq *rq = this_rq();
 
+       if (vtime_accounting_enabled())
+               return;
+
        if (sched_clock_irqtime) {
                irqtime_account_process_tick(p, user_tick, rq);
                return;
@@ -438,8 +443,7 @@ void account_idle_ticks(unsigned long ticks)
 
        account_idle_time(jiffies_to_cputime(ticks));
 }
-
-#endif
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 /*
  * Use precise platform statistics if available:
@@ -461,25 +465,20 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
        *st = cputime.stime;
 }
 
-void vtime_account_system_irqsafe(struct task_struct *tsk)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       vtime_account_system(tsk);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
-
 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
 void vtime_task_switch(struct task_struct *prev)
 {
+       if (!vtime_accounting_enabled())
+               return;
+
        if (is_idle_task(prev))
                vtime_account_idle(prev);
        else
                vtime_account_system(prev);
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        vtime_account_user(prev);
+#endif
        arch_vtime_task_switch(prev);
 }
 #endif
@@ -493,27 +492,40 @@ void vtime_task_switch(struct task_struct *prev)
  * vtime_account().
  */
 #ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_account(struct task_struct *tsk)
+void vtime_account_irq_enter(struct task_struct *tsk)
 {
-       if (in_interrupt() || !is_idle_task(tsk))
-               vtime_account_system(tsk);
-       else
-               vtime_account_idle(tsk);
+       if (!vtime_accounting_enabled())
+               return;
+
+       if (!in_interrupt()) {
+               /*
+                * If we interrupted user, context_tracking_in_user()
+                * is 1 because the context tracking don't hook
+                * on irq entry/exit. This way we know if
+                * we need to flush user time on kernel entry.
+                */
+               if (context_tracking_in_user()) {
+                       vtime_account_user(tsk);
+                       return;
+               }
+
+               if (is_idle_task(tsk)) {
+                       vtime_account_idle(tsk);
+                       return;
+               }
+       }
+       vtime_account_system(tsk);
 }
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
 
-#else
-
-#ifndef nsecs_to_cputime
-# define nsecs_to_cputime(__nsecs)     nsecs_to_jiffies(__nsecs)
-#endif
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
 
-static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
+static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
 {
        u64 temp = (__force u64) rtime;
 
-       temp *= (__force u64) utime;
+       temp *= (__force u64) stime;
 
        if (sizeof(cputime_t) == 4)
                temp = div_u64(temp, (__force u32) total);
@@ -531,10 +543,10 @@ static void cputime_adjust(struct task_cputime *curr,
                           struct cputime *prev,
                           cputime_t *ut, cputime_t *st)
 {
-       cputime_t rtime, utime, total;
+       cputime_t rtime, stime, total;
 
-       utime = curr->utime;
-       total = utime + curr->stime;
+       stime = curr->stime;
+       total = stime + curr->utime;
 
        /*
         * Tick based cputime accounting depend on random scheduling
@@ -549,17 +561,17 @@ static void cputime_adjust(struct task_cputime *curr,
        rtime = nsecs_to_cputime(curr->sum_exec_runtime);
 
        if (total)
-               utime = scale_utime(utime, rtime, total);
+               stime = scale_stime(stime, rtime, total);
        else
-               utime = rtime;
+               stime = rtime;
 
        /*
         * If the tick based count grows faster than the scheduler one,
         * the result of the scaling may go backward.
         * Let's enforce monotonicity.
         */
-       prev->utime = max(prev->utime, utime);
-       prev->stime = max(prev->stime, rtime - prev->utime);
+       prev->stime = max(prev->stime, stime);
+       prev->utime = max(prev->utime, rtime - prev->stime);
 
        *ut = prev->utime;
        *st = prev->stime;
@@ -568,11 +580,10 @@ static void cputime_adjust(struct task_cputime *curr,
 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
        struct task_cputime cputime = {
-               .utime = p->utime,
-               .stime = p->stime,
                .sum_exec_runtime = p->se.sum_exec_runtime,
        };
 
+       task_cputime(p, &cputime.utime, &cputime.stime);
        cputime_adjust(&cputime, &p->prev_cputime, ut, st);
 }
 
@@ -586,4 +597,221 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
        thread_group_cputime(p, &cputime);
        cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
 }
-#endif
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+static unsigned long long vtime_delta(struct task_struct *tsk)
+{
+       unsigned long long clock;
+
+       clock = sched_clock();
+       if (clock < tsk->vtime_snap)
+               return 0;
+
+       return clock - tsk->vtime_snap;
+}
+
+static cputime_t get_vtime_delta(struct task_struct *tsk)
+{
+       unsigned long long delta = vtime_delta(tsk);
+
+       WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
+       tsk->vtime_snap += delta;
+
+       /* CHECKME: always safe to convert nsecs to cputime? */
+       return nsecs_to_cputime(delta);
+}
+
+static void __vtime_account_system(struct task_struct *tsk)
+{
+       cputime_t delta_cpu = get_vtime_delta(tsk);
+
+       account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
+}
+
+void vtime_account_system(struct task_struct *tsk)
+{
+       if (!vtime_accounting_enabled())
+               return;
+
+       write_seqlock(&tsk->vtime_seqlock);
+       __vtime_account_system(tsk);
+       write_sequnlock(&tsk->vtime_seqlock);
+}
+
+void vtime_account_irq_exit(struct task_struct *tsk)
+{
+       if (!vtime_accounting_enabled())
+               return;
+
+       write_seqlock(&tsk->vtime_seqlock);
+       if (context_tracking_in_user())
+               tsk->vtime_snap_whence = VTIME_USER;
+       __vtime_account_system(tsk);
+       write_sequnlock(&tsk->vtime_seqlock);
+}
+
+void vtime_account_user(struct task_struct *tsk)
+{
+       cputime_t delta_cpu;
+
+       if (!vtime_accounting_enabled())
+               return;
+
+       delta_cpu = get_vtime_delta(tsk);
+
+       write_seqlock(&tsk->vtime_seqlock);
+       tsk->vtime_snap_whence = VTIME_SYS;
+       account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
+       write_sequnlock(&tsk->vtime_seqlock);
+}
+
+void vtime_user_enter(struct task_struct *tsk)
+{
+       if (!vtime_accounting_enabled())
+               return;
+
+       write_seqlock(&tsk->vtime_seqlock);
+       tsk->vtime_snap_whence = VTIME_USER;
+       __vtime_account_system(tsk);
+       write_sequnlock(&tsk->vtime_seqlock);
+}
+
+void vtime_guest_enter(struct task_struct *tsk)
+{
+       write_seqlock(&tsk->vtime_seqlock);
+       __vtime_account_system(tsk);
+       current->flags |= PF_VCPU;
+       write_sequnlock(&tsk->vtime_seqlock);
+}
+
+void vtime_guest_exit(struct task_struct *tsk)
+{
+       write_seqlock(&tsk->vtime_seqlock);
+       __vtime_account_system(tsk);
+       current->flags &= ~PF_VCPU;
+       write_sequnlock(&tsk->vtime_seqlock);
+}
+
+void vtime_account_idle(struct task_struct *tsk)
+{
+       cputime_t delta_cpu = get_vtime_delta(tsk);
+
+       account_idle_time(delta_cpu);
+}
+
+bool vtime_accounting_enabled(void)
+{
+       return context_tracking_active();
+}
+
+void arch_vtime_task_switch(struct task_struct *prev)
+{
+       write_seqlock(&prev->vtime_seqlock);
+       prev->vtime_snap_whence = VTIME_SLEEPING;
+       write_sequnlock(&prev->vtime_seqlock);
+
+       write_seqlock(&current->vtime_seqlock);
+       current->vtime_snap_whence = VTIME_SYS;
+       current->vtime_snap = sched_clock();
+       write_sequnlock(&current->vtime_seqlock);
+}
+
+void vtime_init_idle(struct task_struct *t)
+{
+       unsigned long flags;
+
+       write_seqlock_irqsave(&t->vtime_seqlock, flags);
+       t->vtime_snap_whence = VTIME_SYS;
+       t->vtime_snap = sched_clock();
+       write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
+}
+
+cputime_t task_gtime(struct task_struct *t)
+{
+       unsigned int seq;
+       cputime_t gtime;
+
+       do {
+               seq = read_seqbegin(&t->vtime_seqlock);
+
+               gtime = t->gtime;
+               if (t->flags & PF_VCPU)
+                       gtime += vtime_delta(t);
+
+       } while (read_seqretry(&t->vtime_seqlock, seq));
+
+       return gtime;
+}
+
+/*
+ * Fetch cputime raw values from fields of task_struct and
+ * add up the pending nohz execution time since the last
+ * cputime snapshot.
+ */
+static void
+fetch_task_cputime(struct task_struct *t,
+                  cputime_t *u_dst, cputime_t *s_dst,
+                  cputime_t *u_src, cputime_t *s_src,
+                  cputime_t *udelta, cputime_t *sdelta)
+{
+       unsigned int seq;
+       unsigned long long delta;
+
+       do {
+               *udelta = 0;
+               *sdelta = 0;
+
+               seq = read_seqbegin(&t->vtime_seqlock);
+
+               if (u_dst)
+                       *u_dst = *u_src;
+               if (s_dst)
+                       *s_dst = *s_src;
+
+               /* Task is sleeping, nothing to add */
+               if (t->vtime_snap_whence == VTIME_SLEEPING ||
+                   is_idle_task(t))
+                       continue;
+
+               delta = vtime_delta(t);
+
+               /*
+                * Task runs either in user or kernel space, add pending nohz time to
+                * the right place.
+                */
+               if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
+                       *udelta = delta;
+               } else {
+                       if (t->vtime_snap_whence == VTIME_SYS)
+                               *sdelta = delta;
+               }
+       } while (read_seqretry(&t->vtime_seqlock, seq));
+}
+
+
+void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
+{
+       cputime_t udelta, sdelta;
+
+       fetch_task_cputime(t, utime, stime, &t->utime,
+                          &t->stime, &udelta, &sdelta);
+       if (utime)
+               *utime += udelta;
+       if (stime)
+               *stime += sdelta;
+}
+
+void task_cputime_scaled(struct task_struct *t,
+                        cputime_t *utimescaled, cputime_t *stimescaled)
+{
+       cputime_t udelta, sdelta;
+
+       fetch_task_cputime(t, utimescaled, stimescaled,
+                          &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
+       if (utimescaled)
+               *utimescaled += cputime_to_scaled(udelta);
+       if (stimescaled)
+               *stimescaled += cputime_to_scaled(sdelta);
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
index 2cd3c1b4e582857eefb87fa9a98fb9cad11509a6..557e7b53b323b93a9ad7f62a9c7137da31bb3779 100644 (file)
@@ -110,13 +110,6 @@ static char *task_group_path(struct task_group *tg)
        if (autogroup_path(tg, group_path, PATH_MAX))
                return group_path;
 
-       /*
-        * May be NULL if the underlying cgroup isn't fully-created yet
-        */
-       if (!tg->css.cgroup) {
-               group_path[0] = '\0';
-               return group_path;
-       }
        cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
        return group_path;
 }
@@ -222,8 +215,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
                        cfs_rq->runnable_load_avg);
        SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
                        cfs_rq->blocked_load_avg);
-       SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
-                       atomic64_read(&cfs_rq->tg->load_avg));
+       SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg",
+                       (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
        SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
                        cfs_rq->tg_load_contrib);
        SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
index 5eea8707234a15167eeaabbc5881f04cf0345ca1..7a33e5986fc522e8860a712a234b39774ee80938 100644 (file)
@@ -1680,9 +1680,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        }
 
        /* ensure we never gain time by being placed backwards. */
-       vruntime = max_vruntime(se->vruntime, vruntime);
-
-       se->vruntime = vruntime;
+       se->vruntime = max_vruntime(se->vruntime, vruntime);
 }
 
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -2663,7 +2661,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
-static void unthrottle_offline_cfs_rqs(struct rq *rq)
+static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
 
@@ -3254,25 +3252,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  */
 static int select_idle_sibling(struct task_struct *p, int target)
 {
-       int cpu = smp_processor_id();
-       int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
        struct sched_group *sg;
-       int i;
+       int i = task_cpu(p);
 
-       /*
-        * If the task is going to be woken-up on this cpu and if it is
-        * already idle, then it is the right target.
-        */
-       if (target == cpu && idle_cpu(cpu))
-               return cpu;
+       if (idle_cpu(target))
+               return target;
 
        /*
-        * If the task is going to be woken-up on the cpu where it previously
-        * ran and if it is currently idle, then it the right target.
+        * If the prevous cpu is cache affine and idle, don't be stupid.
         */
-       if (target == prev_cpu && idle_cpu(prev_cpu))
-               return prev_cpu;
+       if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
+               return i;
 
        /*
         * Otherwise, iterate the domains and find an elegible idle cpu.
@@ -3286,7 +3277,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
                                goto next;
 
                        for_each_cpu(i, sched_group_cpus(sg)) {
-                               if (!idle_cpu(i))
+                               if (i == target || !idle_cpu(i))
                                        goto next;
                        }
 
@@ -6101,7 +6092,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
         * idle runqueue:
         */
        if (rq->cfs.load.weight)
-               rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
+               rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
 
        return rr_interval;
 }
index 418feb01344edb7e59f11643e153e2f1866cc4ed..127a2c4cf4ab4f176bbf554be2ad34f4d597c258 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <linux/slab.h>
 
+int sched_rr_timeslice = RR_TIMESLICE;
+
 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
 
 struct rt_bandwidth def_rt_bandwidth;
@@ -566,7 +568,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 static int do_balance_runtime(struct rt_rq *rt_rq)
 {
        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
-       struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+       struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
        int i, weight, more = 0;
        u64 rt_period;
 
@@ -925,8 +927,8 @@ static void update_curr_rt(struct rq *rq)
                return;
 
        delta_exec = rq->clock_task - curr->se.exec_start;
-       if (unlikely((s64)delta_exec < 0))
-               delta_exec = 0;
+       if (unlikely((s64)delta_exec <= 0))
+               return;
 
        schedstat_set(curr->se.statistics.exec_max,
                      max(curr->se.statistics.exec_max, delta_exec));
@@ -1427,8 +1429,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 {
        if (!task_running(rq, p) &&
-           (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
-           (p->nr_cpus_allowed > 1))
+           cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
                return 1;
        return 0;
 }
@@ -1889,8 +1890,11 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
         * we may need to handle the pulling of RT tasks
         * now.
         */
-       if (p->on_rq && !rq->rt.rt_nr_running)
-               pull_rt_task(rq);
+       if (!p->on_rq || rq->rt.rt_nr_running)
+               return;
+
+       if (pull_rt_task(rq))
+               resched_task(rq->curr);
 }
 
 void init_sched_rt_class(void)
@@ -1985,7 +1989,11 @@ static void watchdog(struct rq *rq, struct task_struct *p)
        if (soft != RLIM_INFINITY) {
                unsigned long next;
 
-               p->rt.timeout++;
+               if (p->rt.watchdog_stamp != jiffies) {
+                       p->rt.timeout++;
+                       p->rt.watchdog_stamp = jiffies;
+               }
+
                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
                if (p->rt.timeout > next)
                        p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
@@ -2010,7 +2018,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
        if (--p->rt.time_slice)
                return;
 
-       p->rt.time_slice = RR_TIMESLICE;
+       p->rt.time_slice = sched_rr_timeslice;
 
        /*
         * Requeue to the end of queue if we (and all of our ancestors) are the
@@ -2041,7 +2049,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
         * Time slice is 0 for SCHED_FIFO tasks
         */
        if (task->policy == SCHED_RR)
-               return RR_TIMESLICE;
+               return sched_rr_timeslice;
        else
                return 0;
 }
index fc886441436ac29ec87cc95211e03ed32ec77e83..cc03cfdf469f6af1bda58d936539c9348b7e9501 100644 (file)
@@ -1,5 +1,7 @@
 
 #include <linux/sched.h>
+#include <linux/sched/sysctl.h>
+#include <linux/sched/rt.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/stop_machine.h>
index 372771e948c230141a767003fa0f2e18c0d44db2..7f82adbad4800a08a5ca5e1836da455de555856a 100644 (file)
@@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
  * No need to set need_resched since signal event passing
  * goes through ->blocked
  */
-void signal_wake_up(struct task_struct *t, int resume)
+void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
-       unsigned int mask;
-
        set_tsk_thread_flag(t, TIF_SIGPENDING);
-
        /*
-        * For SIGKILL, we want to wake it up in the stopped/traced/killable
+        * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
         * case. We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
         * By using wake_up_state, we ensure the process will wake up and
         * handle its death signal.
         */
-       mask = TASK_INTERRUPTIBLE;
-       if (resume)
-               mask |= TASK_WAKEKILL;
-       if (!wake_up_state(t, mask))
+       if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
                kick_process(t);
 }
 
@@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)
        assert_spin_locked(&t->sighand->siglock);
 
        task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
-       signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+       ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
 }
 
 /*
@@ -1638,6 +1632,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        unsigned long flags;
        struct sighand_struct *psig;
        bool autoreap = false;
+       cputime_t utime, stime;
 
        BUG_ON(sig == -1);
 
@@ -1675,8 +1670,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
                                       task_uid(tsk));
        rcu_read_unlock();
 
-       info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime);
-       info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime);
+       task_cputime(tsk, &utime, &stime);
+       info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
+       info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
@@ -1740,6 +1736,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
        unsigned long flags;
        struct task_struct *parent;
        struct sighand_struct *sighand;
+       cputime_t utime, stime;
 
        if (for_ptracer) {
                parent = tsk->parent;
@@ -1758,8 +1755,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
        info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
        rcu_read_unlock();
 
-       info.si_utime = cputime_to_clock_t(tsk->utime);
-       info.si_stime = cputime_to_clock_t(tsk->stime);
+       task_cputime(tsk, &utime, &stime);
+       info.si_utime = cputime_to_clock_t(utime);
+       info.si_stime = cputime_to_clock_t(stime);
 
        info.si_code = why;
        switch (why) {
@@ -1800,6 +1798,10 @@ static inline int may_ptrace_stop(void)
         * If SIGKILL was already sent before the caller unlocked
         * ->siglock we must see ->core_state != NULL. Otherwise it
         * is safe to enter schedule().
+        *
+        * This is almost outdated, a task with the pending SIGKILL can't
+        * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
+        * after SIGKILL was already dequeued.
         */
        if (unlikely(current->mm->core_state) &&
            unlikely(current->mm == current->parent->mm))
@@ -1925,6 +1927,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
                if (gstop_done)
                        do_notify_parent_cldstop(current, false, why);
 
+               /* tasklist protects us from ptrace_freeze_traced() */
                __set_current_state(TASK_RUNNING);
                if (clear_code)
                        current->exit_code = 0;
@@ -3116,8 +3119,9 @@ int __save_altstack(stack_t __user *uss, unsigned long sp)
 
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_GENERIC_SIGALTSTACK
-asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
-                                      compat_stack_t __user *uoss_ptr)
+COMPAT_SYSCALL_DEFINE2(sigaltstack,
+                       const compat_stack_t __user *, uss_ptr,
+                       compat_stack_t __user *, uoss_ptr)
 {
        stack_t uss, uoss;
        int ret;
index 29dd40a9f2f403ab86f39c96ddb505851b1d36c2..69f38bd98b423a53ada73c7a6b7a2217e91a77b4 100644 (file)
@@ -33,6 +33,7 @@ struct call_function_data {
        struct call_single_data csd;
        atomic_t                refs;
        cpumask_var_t           cpumask;
+       cpumask_var_t           cpumask_ipi;
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
                if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
                                cpu_to_node(cpu)))
                        return notifier_from_errno(-ENOMEM);
+               if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
+                               cpu_to_node(cpu)))
+                       return notifier_from_errno(-ENOMEM);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                free_cpumask_var(cfd->cpumask);
+               free_cpumask_var(cfd->cpumask_ipi);
                break;
 #endif
        };
@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,
                return;
        }
 
+       /*
+        * After we put an entry into the list, data->cpumask
+        * may be cleared again when another CPU sends another IPI for
+        * a SMP function call, so data->cpumask will be zero.
+        */
+       cpumask_copy(data->cpumask_ipi, data->cpumask);
        raw_spin_lock_irqsave(&call_function.lock, flags);
        /*
         * Place entry at the _HEAD_ of the list, so that any cpu still
@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,
        smp_mb();
 
        /* Send a message to all CPUs in the map */
-       arch_send_call_function_ipi_mask(data->cpumask);
+       arch_send_call_function_ipi_mask(data->cpumask_ipi);
 
        /* Optionally wait for the CPUs to complete */
        if (wait)
index d6c5fc0542428dae4fffc7546e06b0f6dd4ac4f5..d4abac261779e037b2396b80f5b4fe3ec6efad71 100644 (file)
@@ -183,9 +183,10 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
-
        get_task_struct(tsk);
        *per_cpu_ptr(ht->store, cpu) = tsk;
+       if (ht->create)
+               ht->create(cpu);
        return 0;
 }
 
@@ -225,7 +226,7 @@ static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
 {
        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 
-       if (tsk)
+       if (tsk && !ht->selfparking)
                kthread_park(tsk);
 }
 
index ed567babe789c10ef48a2db7b63e17e0a2192d65..f5cc25f147a646e210384d1cb98ae4a97240b3a7 100644 (file)
@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
        current->flags &= ~PF_MEMALLOC;
 
        pending = local_softirq_pending();
-       vtime_account_irq_enter(current);
+       account_irq_enter_time(current);
 
        __local_bh_disable((unsigned long)__builtin_return_address(0),
                                SOFTIRQ_OFFSET);
@@ -272,7 +272,7 @@ restart:
 
        lockdep_softirq_exit();
 
-       vtime_account_irq_exit(current);
+       account_irq_exit_time(current);
        __local_bh_enable(SOFTIRQ_OFFSET);
        tsk_restore_flags(current, old_flags, PF_MEMALLOC);
 }
@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
  */
 void irq_exit(void)
 {
-       vtime_account_irq_exit(current);
+       account_irq_exit_time(current);
        trace_hardirq_exit();
        sub_preempt_count(IRQ_EXIT_OFFSET);
        if (!in_interrupt() && local_softirq_pending())
index 2b859828cdc327d811627fbf877318de8dbc5595..01d5ccb8bfe3d96cf3cba70d217a8bddf0ff5f8a 100644 (file)
@@ -282,12 +282,8 @@ static int srcu_readers_active(struct srcu_struct *sp)
  */
 void cleanup_srcu_struct(struct srcu_struct *sp)
 {
-       int sum;
-
-       sum = srcu_readers_active(sp);
-       WARN_ON(sum);  /* Leakage unless caller handles error. */
-       if (sum != 0)
-               return;
+       if (WARN_ON(srcu_readers_active(sp)))
+               return; /* Leakage unless caller handles error. */
        free_percpu(sp->per_cpu_ref);
        sp->per_cpu_ref = NULL;
 }
@@ -302,9 +298,8 @@ int __srcu_read_lock(struct srcu_struct *sp)
 {
        int idx;
 
+       idx = ACCESS_ONCE(sp->completed) & 0x1;
        preempt_disable();
-       idx = rcu_dereference_index_check(sp->completed,
-                                         rcu_read_lock_sched_held()) & 0x1;
        ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
        ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
@@ -321,10 +316,8 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
-       preempt_disable();
        smp_mb(); /* C */  /* Avoid leaking the critical section. */
-       ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
-       preempt_enable();
+       this_cpu_dec(sp->per_cpu_ref->c[idx]);
 }
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
@@ -423,6 +416,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
                           !lock_is_held(&rcu_sched_lock_map),
                           "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
 
+       might_sleep();
        init_completion(&rcu.completion);
 
        head->next = NULL;
@@ -455,10 +449,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
  * @sp: srcu_struct with which to synchronize.
  *
- * Flip the completed counter, and wait for the old count to drain to zero.
- * As with classic RCU, the updater must use some separate means of
- * synchronizing concurrent updates.  Can block; must be called from
- * process context.
+ * Wait for the count to drain to zero of both indexes. To avoid the
+ * possible starvation of synchronize_srcu(), it waits for the count of
+ * the index=((->completed & 1) ^ 1) to drain to zero at first,
+ * and then flip the completed and wait for the count of the other index.
+ *
+ * Can block; must be called from process context.
  *
  * Note that it is illegal to call synchronize_srcu() from the corresponding
  * SRCU read-side critical section; doing so will result in deadlock.
@@ -480,12 +476,11 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
  * Wait for an SRCU grace period to elapse, but be more aggressive about
  * spinning rather than blocking when waiting.
  *
- * Note that it is illegal to call this function while holding any lock
- * that is acquired by a CPU-hotplug notifier.  It is also illegal to call
- * synchronize_srcu_expedited() from the corresponding SRCU read-side
- * critical section; doing so will result in deadlock.  However, it is
- * perfectly legal to call synchronize_srcu_expedited() on one srcu_struct
- * from some other srcu_struct's read-side critical section, as long as
+ * Note that it is also illegal to call synchronize_srcu_expedited()
+ * from the corresponding SRCU read-side critical section;
+ * doing so will result in deadlock.  However, it is perfectly legal
+ * to call synchronize_srcu_expedited() on one srcu_struct from some
+ * other srcu_struct's read-side critical section, as long as
  * the resulting graph of srcu_structs is acyclic.
  */
 void synchronize_srcu_expedited(struct srcu_struct *sp)
index 2f194e965715183786f2f8b640ddcbbf1b456dc4..95d178c62d5a8537c18fa2d6f1d947aed1a93448 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/stop_machine.h>
 #include <linux/interrupt.h>
 #include <linux/kallsyms.h>
-
+#include <linux/smpboot.h>
 #include <linux/atomic.h>
 
 /*
@@ -37,10 +37,10 @@ struct cpu_stopper {
        spinlock_t              lock;
        bool                    enabled;        /* is this stopper enabled? */
        struct list_head        works;          /* list of pending works */
-       struct task_struct      *thread;        /* stopper thread */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
 static bool stop_machine_initialized = false;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
@@ -62,16 +62,18 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
-static void cpu_stop_queue_work(struct cpu_stopper *stopper,
-                               struct cpu_stop_work *work)
+static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
+
        unsigned long flags;
 
        spin_lock_irqsave(&stopper->lock, flags);
 
        if (stopper->enabled) {
                list_add_tail(&work->list, &stopper->works);
-               wake_up_process(stopper->thread);
+               wake_up_process(p);
        } else
                cpu_stop_signal_done(work->done, false);
 
@@ -108,7 +110,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
        struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
 
        cpu_stop_init_done(&done, 1);
-       cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+       cpu_stop_queue_work(cpu, &work);
        wait_for_completion(&done.completion);
        return done.executed ? done.ret : -ENOENT;
 }
@@ -130,7 +132,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
                        struct cpu_stop_work *work_buf)
 {
        *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
-       cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
+       cpu_stop_queue_work(cpu, work_buf);
 }
 
 /* static data for stop_cpus */
@@ -159,8 +161,7 @@ static void queue_stop_cpus_work(const struct cpumask *cpumask,
         */
        preempt_disable();
        for_each_cpu(cpu, cpumask)
-               cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
-                                   &per_cpu(stop_cpus_work, cpu));
+               cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
        preempt_enable();
 }
 
@@ -244,20 +245,25 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
        return ret;
 }
 
-static int cpu_stopper_thread(void *data)
+static int cpu_stop_should_run(unsigned int cpu)
+{
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       unsigned long flags;
+       int run;
+
+       spin_lock_irqsave(&stopper->lock, flags);
+       run = !list_empty(&stopper->works);
+       spin_unlock_irqrestore(&stopper->lock, flags);
+       return run;
+}
+
+static void cpu_stopper_thread(unsigned int cpu)
 {
-       struct cpu_stopper *stopper = data;
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
        struct cpu_stop_work *work;
        int ret;
 
 repeat:
-       set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
-
-       if (kthread_should_stop()) {
-               __set_current_state(TASK_RUNNING);
-               return 0;
-       }
-
        work = NULL;
        spin_lock_irq(&stopper->lock);
        if (!list_empty(&stopper->works)) {
@@ -273,8 +279,6 @@ repeat:
                struct cpu_stop_done *done = work->done;
                char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 
-               __set_current_state(TASK_RUNNING);
-
                /* cpu stop callbacks are not allowed to sleep */
                preempt_disable();
 
@@ -290,88 +294,55 @@ repeat:
                                          ksym_buf), arg);
 
                cpu_stop_signal_done(done, true);
-       } else
-               schedule();
-
-       goto repeat;
+               goto repeat;
+       }
 }
 
 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 
-/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
-static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
-                                          unsigned long action, void *hcpu)
+static void cpu_stop_create(unsigned int cpu)
+{
+       sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
+}
+
+static void cpu_stop_park(unsigned int cpu)
 {
-       unsigned int cpu = (unsigned long)hcpu;
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       struct task_struct *p;
-
-       switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               BUG_ON(stopper->thread || stopper->enabled ||
-                      !list_empty(&stopper->works));
-               p = kthread_create_on_node(cpu_stopper_thread,
-                                          stopper,
-                                          cpu_to_node(cpu),
-                                          "migration/%d", cpu);
-               if (IS_ERR(p))
-                       return notifier_from_errno(PTR_ERR(p));
-               get_task_struct(p);
-               kthread_bind(p, cpu);
-               sched_set_stop_task(cpu, p);
-               stopper->thread = p;
-               break;
-
-       case CPU_ONLINE:
-               /* strictly unnecessary, as first user will wake it */
-               wake_up_process(stopper->thread);
-               /* mark enabled */
-               spin_lock_irq(&stopper->lock);
-               stopper->enabled = true;
-               spin_unlock_irq(&stopper->lock);
-               break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-       case CPU_UP_CANCELED:
-       case CPU_POST_DEAD:
-       {
-               struct cpu_stop_work *work;
-
-               sched_set_stop_task(cpu, NULL);
-               /* kill the stopper */
-               kthread_stop(stopper->thread);
-               /* drain remaining works */
-               spin_lock_irq(&stopper->lock);
-               list_for_each_entry(work, &stopper->works, list)
-                       cpu_stop_signal_done(work->done, false);
-               stopper->enabled = false;
-               spin_unlock_irq(&stopper->lock);
-               /* release the stopper */
-               put_task_struct(stopper->thread);
-               stopper->thread = NULL;
-               break;
-       }
-#endif
-       }
+       struct cpu_stop_work *work;
+       unsigned long flags;
 
-       return NOTIFY_OK;
+       /* drain remaining works */
+       spin_lock_irqsave(&stopper->lock, flags);
+       list_for_each_entry(work, &stopper->works, list)
+               cpu_stop_signal_done(work->done, false);
+       stopper->enabled = false;
+       spin_unlock_irqrestore(&stopper->lock, flags);
 }
 
-/*
- * Give it a higher priority so that cpu stopper is available to other
- * cpu notifiers.  It currently shares the same priority as sched
- * migration_notifier.
- */
-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
-       .notifier_call  = cpu_stop_cpu_callback,
-       .priority       = 10,
+static void cpu_stop_unpark(unsigned int cpu)
+{
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+       spin_lock_irq(&stopper->lock);
+       stopper->enabled = true;
+       spin_unlock_irq(&stopper->lock);
+}
+
+static struct smp_hotplug_thread cpu_stop_threads = {
+       .store                  = &cpu_stopper_task,
+       .thread_should_run      = cpu_stop_should_run,
+       .thread_fn              = cpu_stopper_thread,
+       .thread_comm            = "migration/%u",
+       .create                 = cpu_stop_create,
+       .setup                  = cpu_stop_unpark,
+       .park                   = cpu_stop_park,
+       .unpark                 = cpu_stop_unpark,
+       .selfparking            = true,
 };
 
 static int __init cpu_stop_init(void)
 {
-       void *bcpu = (void *)(long)smp_processor_id();
        unsigned int cpu;
-       int err;
 
        for_each_possible_cpu(cpu) {
                struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -380,15 +351,8 @@ static int __init cpu_stop_init(void)
                INIT_LIST_HEAD(&stopper->works);
        }
 
-       /* start one for the boot cpu */
-       err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
-                                   bcpu);
-       BUG_ON(err != NOTIFY_OK);
-       cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
-       register_cpu_notifier(&cpu_stop_cpu_notifier);
-
+       BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
        stop_machine_initialized = true;
-
        return 0;
 }
 early_initcall(cpu_stop_init);
index c88878db491e44bd2b5a5ea7f381fc7242187727..4fc9be955c712ca4b68c25b18d1ac9599df2a3ae 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/kmod.h>
 #include <linux/capability.h>
 #include <linux/binfmts.h>
+#include <linux/sched/sysctl.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
@@ -403,6 +404,13 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = sched_rt_handler,
        },
+       {
+               .procname       = "sched_rr_timeslice_ms",
+               .data           = &sched_rr_timeslice,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = sched_rr_handler,
+       },
 #ifdef CONFIG_SCHED_AUTOGROUP
        {
                .procname       = "sched_autogroup_enabled",
index d226c6a3fd28933d02edd485a9c8353db2212c02..c2a27dd93142545369abefa1d2ee2a41a5eb8f09 100644 (file)
@@ -114,6 +114,12 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
        return 0;
 }
 
+/*
+ * Indicates if there is an offset between the system clock and the hardware
+ * clock/persistent clock/rtc.
+ */
+int persistent_clock_is_local;
+
 /*
  * Adjust the time obtained from the CMOS to be UTC time instead of
  * local time.
@@ -135,6 +141,8 @@ static inline void warp_clock(void)
        struct timespec adjust;
 
        adjust = current_kernel_time();
+       if (sys_tz.tz_minuteswest != 0)
+               persistent_clock_is_local = 1;
        adjust.tv_sec += sys_tz.tz_minuteswest * 60;
        do_settimeofday(&adjust);
 }
index 8601f0db12612a1e18a3a11968b9444630f94ad5..24510d84efd760b287f532727dfc20f0d8e57405 100644 (file)
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG
 config ARCH_CLOCKSOURCE_DATA
        bool
 
+# Platforms has a persistent clock
+config ALWAYS_USE_PERSISTENT_CLOCK
+       bool
+       default n
+
 # Timekeeping vsyscall support
 config GENERIC_TIME_VSYSCALL
        bool
@@ -38,6 +43,10 @@ config GENERIC_CLOCKEVENTS_BUILD
        default y
        depends on GENERIC_CLOCKEVENTS
 
+# Architecture can handle broadcast in a driver-agnostic way
+config ARCH_HAS_TICK_BROADCAST
+       bool
+
 # Clockevents broadcasting infrastructure
 config GENERIC_CLOCKEVENTS_BROADCAST
        bool
index 24174b4d669b1280c632d4ed0126dc2f1bb297ef..b10a42bb0165a40a9398fd639a2da2dc0c5acd4e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/time.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/rtc.h>
 
 #include "tick-internal.h"
 
@@ -483,8 +484,7 @@ out:
        return leap;
 }
 
-#ifdef CONFIG_GENERIC_CMOS_UPDATE
-
+#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
 static void sync_cmos_clock(struct work_struct *work);
 
 static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -510,14 +510,26 @@ static void sync_cmos_clock(struct work_struct *work)
        }
 
        getnstimeofday(&now);
-       if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
-               fail = update_persistent_clock(now);
+       if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
+               struct timespec adjust = now;
+
+               fail = -ENODEV;
+               if (persistent_clock_is_local)
+                       adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
+#ifdef CONFIG_GENERIC_CMOS_UPDATE
+               fail = update_persistent_clock(adjust);
+#endif
+#ifdef CONFIG_RTC_SYSTOHC
+               if (fail == -ENODEV)
+                       fail = rtc_set_ntp_time(adjust);
+#endif
+       }
 
        next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
        if (next.tv_nsec <= 0)
                next.tv_nsec += NSEC_PER_SEC;
 
-       if (!fail)
+       if (!fail || fail == -ENODEV)
                next.tv_sec = 659;
        else
                next.tv_sec = 0;
index f113755695e2351ad9f32b7e38a808e98c0ad5f4..2fb8cb88df8d0296e655f320bc7d5cbce1a858c8 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/percpu.h>
 #include <linux/profile.h>
 #include <linux/sched.h>
+#include <linux/smp.h>
 
 #include "tick-internal.h"
 
@@ -86,6 +87,22 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
        return (dev && tick_broadcast_device.evtdev == dev);
 }
 
+static void err_broadcast(const struct cpumask *mask)
+{
+       pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
+}
+
+static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
+{
+       if (!dev->broadcast)
+               dev->broadcast = tick_broadcast;
+       if (!dev->broadcast) {
+               pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
+                            dev->name);
+               dev->broadcast = err_broadcast;
+       }
+}
+
 /*
  * Check, if the device is disfunctional and a place holder, which
  * needs to be handled by the broadcast device.
@@ -105,6 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
         */
        if (!tick_device_is_functional(dev)) {
                dev->event_handler = tick_handle_periodic;
+               tick_device_setup_broadcast_func(dev);
                cpumask_set_cpu(cpu, tick_get_broadcast_mask());
                tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
                ret = 1;
@@ -116,15 +134,33 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
                 */
                if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
                        int cpu = smp_processor_id();
-
                        cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
                        tick_broadcast_clear_oneshot(cpu);
+               } else {
+                       tick_device_setup_broadcast_func(dev);
                }
        }
        raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
        return ret;
 }
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+int tick_receive_broadcast(void)
+{
+       struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+       struct clock_event_device *evt = td->evtdev;
+
+       if (!evt)
+               return -ENODEV;
+
+       if (!evt->event_handler)
+               return -EINVAL;
+
+       evt->event_handler(evt);
+       return 0;
+}
+#endif
+
 /*
  * Broadcast the event to the cpus, which are set in the mask (mangled).
  */
index d58e552d9fd154b39fffaa012a6028838b9955fd..314b9ee07edf076dcba956f064075ef9768e0422 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/profile.h>
 #include <linux/sched.h>
 #include <linux/module.h>
+#include <linux/irq_work.h>
 
 #include <asm/irq_regs.h>
 
@@ -28,7 +29,7 @@
 /*
  * Per cpu nohz control structure
  */
-static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
+DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
 
 /*
  * The time, when the last jiffy update happened. Protected by jiffies_lock.
@@ -331,8 +332,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                time_delta = timekeeping_max_deferment();
        } while (read_seqretry(&jiffies_lock, seq));
 
-       if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
-           arch_needs_cpu(cpu)) {
+       if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
+           arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
                next_jiffies = last_jiffies + 1;
                delta_jiffies = 1;
        } else {
@@ -631,8 +632,11 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 
 static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
 {
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
        unsigned long ticks;
+
+       if (vtime_accounting_enabled())
+               return;
        /*
         * We stopped the tick in idle. Update process times would miss the
         * time we slept as update_process_times does only a 1 tick
index cbc6acb0db3fafbaa4830da34cdb22e538892764..1e35515a875e86966e08a47737c6dfb070956309 100644 (file)
@@ -29,6 +29,9 @@ static struct timekeeper timekeeper;
 /* flag for if timekeeping is suspended */
 int __read_mostly timekeeping_suspended;
 
+/* Flag for if there is a persistent clock on this platform */
+bool __read_mostly persistent_clock_exist = false;
+
 static inline void tk_normalize_xtime(struct timekeeper *tk)
 {
        while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
@@ -264,19 +267,18 @@ static void timekeeping_forward_now(struct timekeeper *tk)
 }
 
 /**
- * getnstimeofday - Returns the time of day in a timespec
+ * __getnstimeofday - Returns the time of day in a timespec.
  * @ts:                pointer to the timespec to be set
  *
- * Returns the time of day in a timespec.
+ * Updates the time of day in the timespec.
+ * Returns 0 on success, or -ve when suspended (timespec will be undefined).
  */
-void getnstimeofday(struct timespec *ts)
+int __getnstimeofday(struct timespec *ts)
 {
        struct timekeeper *tk = &timekeeper;
        unsigned long seq;
        s64 nsecs = 0;
 
-       WARN_ON(timekeeping_suspended);
-
        do {
                seq = read_seqbegin(&tk->lock);
 
@@ -287,6 +289,26 @@ void getnstimeofday(struct timespec *ts)
 
        ts->tv_nsec = 0;
        timespec_add_ns(ts, nsecs);
+
+       /*
+        * Do not bail out early, in case there were callers still using
+        * the value, even in the face of the WARN_ON.
+        */
+       if (unlikely(timekeeping_suspended))
+               return -EAGAIN;
+       return 0;
+}
+EXPORT_SYMBOL(__getnstimeofday);
+
+/**
+ * getnstimeofday - Returns the time of day in a timespec.
+ * @ts:                pointer to the timespec to be set
+ *
+ * Returns the time of day in a timespec (WARN if suspended).
+ */
+void getnstimeofday(struct timespec *ts)
+{
+       WARN_ON(__getnstimeofday(ts));
 }
 EXPORT_SYMBOL(getnstimeofday);
 
@@ -640,12 +662,14 @@ void __init timekeeping_init(void)
        struct timespec now, boot, tmp;
 
        read_persistent_clock(&now);
+
        if (!timespec_valid_strict(&now)) {
                pr_warn("WARNING: Persistent clock returned invalid value!\n"
                        "         Check your CMOS/BIOS settings.\n");
                now.tv_sec = 0;
                now.tv_nsec = 0;
-       }
+       } else if (now.tv_sec || now.tv_nsec)
+               persistent_clock_exist = true;
 
        read_boot_clock(&boot);
        if (!timespec_valid_strict(&boot)) {
@@ -718,11 +742,12 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
 {
        struct timekeeper *tk = &timekeeper;
        unsigned long flags;
-       struct timespec ts;
 
-       /* Make sure we don't set the clock twice */
-       read_persistent_clock(&ts);
-       if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
+       /*
+        * Make sure we don't set the clock twice, as timekeeping_resume()
+        * already did it
+        */
+       if (has_persistent_clock())
                return;
 
        write_seqlock_irqsave(&tk->lock, flags);
index eb51d76e058a401477776e279fba95c95a315a4a..3f42652a6a3749ca2b17dbaf397e483a3f2e6667 100644 (file)
@@ -369,10 +369,8 @@ if ($hz eq '--can') {
                die "Usage: $0 HZ\n";
        }
 
-       @val = @{$canned_values{$hz}};
-       if (!defined(@val)) {
-               @val = compute_values($hz);
-       }
+       $cv = $canned_values{$hz};
+       @val = defined($cv) ? @$cv : compute_values($hz);
        output($hz, @val);
 }
 exit 0;
index 367d008584823a6fe01ed013cda8c3693fcfd761..dbf7a78a1ef14a0200a1a28835ebb292be2aae4c 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/kallsyms.h>
 #include <linux/irq_work.h>
 #include <linux/sched.h>
+#include <linux/sched/sysctl.h>
 #include <linux/slab.h>
 
 #include <asm/uaccess.h>
@@ -1351,7 +1352,6 @@ void update_process_times(int user_tick)
        account_process_tick(p, user_tick);
        run_local_timers();
        rcu_check_callbacks(cpu, user_tick);
-       printk_tick();
 #ifdef CONFIG_IRQ_WORK
        if (in_irq())
                irq_work_run();
index 5d89335a485f7480ea4fa3924261e5d659e79ad0..36567564e221195d1a8489207fed82f27c36b85e 100644 (file)
@@ -39,6 +39,9 @@ config HAVE_DYNAMIC_FTRACE
        help
          See Documentation/trace/ftrace-design.txt
 
+config HAVE_DYNAMIC_FTRACE_WITH_REGS
+       bool
+
 config HAVE_FTRACE_MCOUNT_RECORD
        bool
        help
@@ -250,6 +253,16 @@ config FTRACE_SYSCALLS
        help
          Basic tracer to catch the syscall entry and exit events.
 
+config TRACER_SNAPSHOT
+       bool "Create a snapshot trace buffer"
+       select TRACER_MAX_TRACE
+       help
+         Allow tracing users to take snapshot of the current buffer using the
+         ftrace interface, e.g.:
+
+             echo 1 > /sys/kernel/debug/tracing/snapshot
+             cat snapshot
+
 config TRACE_BRANCH_PROFILING
        bool
        select GENERIC_TRACER
@@ -434,6 +447,11 @@ config DYNAMIC_FTRACE
          were made. If so, it runs stop_machine (stops all CPUS)
          and modifies the code to jump over the call to ftrace.
 
+config DYNAMIC_FTRACE_WITH_REGS
+       def_bool y
+       depends on DYNAMIC_FTRACE
+       depends on HAVE_DYNAMIC_FTRACE_WITH_REGS
+
 config FUNCTION_PROFILER
        bool "Kernel function profiler"
        depends on FUNCTION_TRACER
index c0bd0308741ca1a343a9cbc0b551b4a12438ddd7..71259e2b6b6167985fe0550e8c47de33a770e603 100644 (file)
@@ -147,7 +147,7 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
                return;
 
        local_irq_save(flags);
-       buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
+       buf = this_cpu_ptr(bt->msg_data);
        va_start(args, fmt);
        n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
        va_end(args);
index 3ffe4c5ad3f37353d45d828407fb484ab811e441..ce8c3d68292f701ac7813f4e20a19af79896fa4b 100644 (file)
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 #endif
 
+/*
+ * Traverse the ftrace_global_list, invoking all entries.  The reason that we
+ * can use rcu_dereference_raw() is that elements removed from this list
+ * are simply leaked, so there is no need to interact with a grace-period
+ * mechanism.  The rcu_dereference_raw() calls are needed to handle
+ * concurrent insertions into the ftrace_global_list.
+ *
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
+ */
+#define do_for_each_ftrace_op(op, list)                        \
+       op = rcu_dereference_raw(list);                 \
+       do
+
+/*
+ * Optimized for just a single item in the list (as that is the normal case).
+ */
+#define while_for_each_ftrace_op(op)                           \
+       while (likely(op = rcu_dereference_raw((op)->next)) &&  \
+              unlikely((op) != &ftrace_list_end))
+
 /**
  * ftrace_nr_registered_ops - return number of ops registered
  *
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void)
        return cnt;
 }
 
-/*
- * Traverse the ftrace_global_list, invoking all entries.  The reason that we
- * can use rcu_dereference_raw() is that elements removed from this list
- * are simply leaked, so there is no need to interact with a grace-period
- * mechanism.  The rcu_dereference_raw() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
- *
- * Silly Alpha and silly pointer-speculation compiler optimizations!
- */
 static void
 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
                        struct ftrace_ops *op, struct pt_regs *regs)
 {
-       if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
+       int bit;
+
+       bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
+       if (bit < 0)
                return;
 
-       trace_recursion_set(TRACE_GLOBAL_BIT);
-       op = rcu_dereference_raw(ftrace_global_list); /*see above*/
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_global_list) {
                op->func(ip, parent_ip, op, regs);
-               op = rcu_dereference_raw(op->next); /*see above*/
-       };
-       trace_recursion_clear(TRACE_GLOBAL_BIT);
+       } while_for_each_ftrace_op(op);
+
+       trace_clear_recursion(bit);
 }
 
 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
@@ -221,10 +233,24 @@ static void update_global_ops(void)
         * registered callers.
         */
        if (ftrace_global_list == &ftrace_list_end ||
-           ftrace_global_list->next == &ftrace_list_end)
+           ftrace_global_list->next == &ftrace_list_end) {
                func = ftrace_global_list->func;
-       else
+               /*
+                * As we are calling the function directly.
+                * If it does not have recursion protection,
+                * the function_trace_op needs to be updated
+                * accordingly.
+                */
+               if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
+                       global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
+               else
+                       global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
+       } else {
                func = ftrace_global_list_func;
+               /* The list has its own recursion protection. */
+               global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
+       }
+
 
        /* If we filter on pids, update to use the pid function */
        if (!list_empty(&ftrace_pids)) {
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
                return -EINVAL;
 
-#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
        /*
         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
@@ -3998,7 +4024,7 @@ static int ftrace_module_notify(struct notifier_block *self,
 
 struct notifier_block ftrace_module_nb = {
        .notifier_call = ftrace_module_notify,
-       .priority = 0,
+       .priority = INT_MAX,    /* Run before anything that can use kprobes */
 };
 
 extern unsigned long __start_mcount_loc[];
@@ -4090,14 +4116,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
         */
        preempt_disable_notrace();
        trace_recursion_set(TRACE_CONTROL_BIT);
-       op = rcu_dereference_raw(ftrace_control_list);
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_control_list) {
                if (!ftrace_function_local_disabled(op) &&
                    ftrace_ops_test(op, ip))
                        op->func(ip, parent_ip, op, regs);
-
-               op = rcu_dereference_raw(op->next);
-       };
+       } while_for_each_ftrace_op(op);
        trace_recursion_clear(TRACE_CONTROL_BIT);
        preempt_enable_notrace();
 }
@@ -4112,27 +4135,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *ignored, struct pt_regs *regs)
 {
        struct ftrace_ops *op;
+       int bit;
 
        if (function_trace_stop)
                return;
 
-       if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
+       bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
+       if (bit < 0)
                return;
 
-       trace_recursion_set(TRACE_INTERNAL_BIT);
        /*
         * Some of the ops may be dynamically allocated,
         * they must be freed after a synchronize_sched().
         */
        preempt_disable_notrace();
-       op = rcu_dereference_raw(ftrace_ops_list);
-       while (op != &ftrace_list_end) {
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
                if (ftrace_ops_test(op, ip))
                        op->func(ip, parent_ip, op, regs);
-               op = rcu_dereference_raw(op->next);
-       };
+       } while_for_each_ftrace_op(op);
        preempt_enable_notrace();
-       trace_recursion_clear(TRACE_INTERNAL_BIT);
+       trace_clear_recursion(bit);
 }
 
 /*
@@ -4143,8 +4165,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
  * Archs are to support both the regs and ftrace_ops at the same time.
  * If they support ftrace_ops, it is assumed they support regs.
  * If call backs want to use regs, they must either check for regs
- * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
- * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
+ * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
+ * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
  * An architecture can pass partial regs with ftrace_ops and still
  * set the ARCH_SUPPORT_FTARCE_OPS.
  */
index ce8514feedcdf29c181dda4972eb7a53cd0df04e..7244acde77b0a9a5670d401fc505c64c5d8b06c0 100644 (file)
@@ -3,8 +3,10 @@
  *
  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  */
+#include <linux/ftrace_event.h>
 #include <linux/ring_buffer.h>
 #include <linux/trace_clock.h>
+#include <linux/trace_seq.h>
 #include <linux/spinlock.h>
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
@@ -21,7 +23,6 @@
 #include <linux/fs.h>
 
 #include <asm/local.h>
-#include "trace.h"
 
 static void update_pages_handler(struct work_struct *work);
 
@@ -2432,41 +2433,76 @@ rb_reserve_next_event(struct ring_buffer *buffer,
 
 #ifdef CONFIG_TRACING
 
-#define TRACE_RECURSIVE_DEPTH 16
+/*
+ * The lock and unlock are done within a preempt disable section.
+ * The current_context per_cpu variable can only be modified
+ * by the current task between lock and unlock. But it can
+ * be modified more than once via an interrupt. To pass this
+ * information from the lock to the unlock without having to
+ * access the 'in_interrupt()' functions again (which do show
+ * a bit of overhead in something as critical as function tracing,
+ * we use a bitmask trick.
+ *
+ *  bit 0 =  NMI context
+ *  bit 1 =  IRQ context
+ *  bit 2 =  SoftIRQ context
+ *  bit 3 =  normal context.
+ *
+ * This works because this is the order of contexts that can
+ * preempt other contexts. A SoftIRQ never preempts an IRQ
+ * context.
+ *
+ * When the context is determined, the corresponding bit is
+ * checked and set (if it was set, then a recursion of that context
+ * happened).
+ *
+ * On unlock, we need to clear this bit. To do so, just subtract
+ * 1 from the current_context and AND it to itself.
+ *
+ * (binary)
+ *  101 - 1 = 100
+ *  101 & 100 = 100 (clearing bit zero)
+ *
+ *  1010 - 1 = 1001
+ *  1010 & 1001 = 1000 (clearing bit 1)
+ *
+ * The least significant bit can be cleared this way, and it
+ * just so happens that it is the same bit corresponding to
+ * the current context.
+ */
+static DEFINE_PER_CPU(unsigned int, current_context);
 
-/* Keep this code out of the fast path cache */
-static noinline void trace_recursive_fail(void)
+static __always_inline int trace_recursive_lock(void)
 {
-       /* Disable all tracing before we do anything else */
-       tracing_off_permanent();
-
-       printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
-                   "HC[%lu]:SC[%lu]:NMI[%lu]\n",
-                   trace_recursion_buffer(),
-                   hardirq_count() >> HARDIRQ_SHIFT,
-                   softirq_count() >> SOFTIRQ_SHIFT,
-                   in_nmi());
-
-       WARN_ON_ONCE(1);
-}
+       unsigned int val = this_cpu_read(current_context);
+       int bit;
 
-static inline int trace_recursive_lock(void)
-{
-       trace_recursion_inc();
+       if (in_interrupt()) {
+               if (in_nmi())
+                       bit = 0;
+               else if (in_irq())
+                       bit = 1;
+               else
+                       bit = 2;
+       } else
+               bit = 3;
 
-       if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
-               return 0;
+       if (unlikely(val & (1 << bit)))
+               return 1;
 
-       trace_recursive_fail();
+       val |= (1 << bit);
+       this_cpu_write(current_context, val);
 
-       return -1;
+       return 0;
 }
 
-static inline void trace_recursive_unlock(void)
+static __always_inline void trace_recursive_unlock(void)
 {
-       WARN_ON_ONCE(!trace_recursion_buffer());
+       unsigned int val = this_cpu_read(current_context);
 
-       trace_recursion_dec();
+       val--;
+       val &= this_cpu_read(current_context);
+       this_cpu_write(current_context, val);
 }
 
 #else
@@ -3066,6 +3102,24 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
 
+/**
+ * ring_buffer_read_events_cpu - get the number of events successfully read
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of events read
+ */
+unsigned long
+ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
+{
+       struct ring_buffer_per_cpu *cpu_buffer;
+
+       if (!cpumask_test_cpu(cpu, buffer->cpumask))
+               return 0;
+
+       cpu_buffer = buffer->buffers[cpu];
+       return cpu_buffer->read;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
+
 /**
  * ring_buffer_entries - get the number of entries in a buffer
  * @buffer: The ring buffer
@@ -3425,7 +3479,7 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
        /* check for end of page padding */
        if ((iter->head >= rb_page_size(iter->head_page)) &&
            (iter->head_page != cpu_buffer->commit_page))
-               rb_advance_iter(iter);
+               rb_inc_iter(iter);
 }
 
 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
index e5125677efa007a8ff58d5cd14b70766117dbc92..c2e2c23103742e3a3e53cf7b318aa0c804f5e1a8 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/poll.h>
 #include <linux/nmi.h>
 #include <linux/fs.h>
+#include <linux/sched/rt.h>
 
 #include "trace.h"
 #include "trace_output.h"
@@ -249,7 +250,7 @@ static unsigned long                trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 static struct tracer           *trace_types __read_mostly;
 
 /* current_trace points to the tracer that is currently active */
-static struct tracer           *current_trace __read_mostly;
+static struct tracer           *current_trace __read_mostly = &nop_trace;
 
 /*
  * trace_types_lock is used to protect the trace_types list.
@@ -709,10 +710,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
                return;
 
        WARN_ON_ONCE(!irqs_disabled());
-       if (!current_trace->use_max_tr) {
-               WARN_ON_ONCE(1);
+
+       if (!current_trace->allocated_snapshot) {
+               /* Only the nop tracer should hit this when disabling */
+               WARN_ON_ONCE(current_trace != &nop_trace);
                return;
        }
+
        arch_spin_lock(&ftrace_max_lock);
 
        tr->buffer = max_tr.buffer;
@@ -739,10 +743,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
                return;
 
        WARN_ON_ONCE(!irqs_disabled());
-       if (!current_trace->use_max_tr) {
-               WARN_ON_ONCE(1);
+       if (WARN_ON_ONCE(!current_trace->allocated_snapshot))
                return;
-       }
 
        arch_spin_lock(&ftrace_max_lock);
 
@@ -862,10 +864,13 @@ int register_tracer(struct tracer *type)
 
                current_trace = type;
 
-               /* If we expanded the buffers, make sure the max is expanded too */
-               if (ring_buffer_expanded && type->use_max_tr)
-                       ring_buffer_resize(max_tr.buffer, trace_buf_size,
-                                               RING_BUFFER_ALL_CPUS);
+               if (type->use_max_tr) {
+                       /* If we expanded the buffers, make sure the max is expanded too */
+                       if (ring_buffer_expanded)
+                               ring_buffer_resize(max_tr.buffer, trace_buf_size,
+                                                  RING_BUFFER_ALL_CPUS);
+                       type->allocated_snapshot = true;
+               }
 
                /* the test is responsible for initializing and enabling */
                pr_info("Testing tracer %s: ", type->name);
@@ -881,10 +886,14 @@ int register_tracer(struct tracer *type)
                /* Only reset on passing, to avoid touching corrupted buffers */
                tracing_reset_online_cpus(tr);
 
-               /* Shrink the max buffer again */
-               if (ring_buffer_expanded && type->use_max_tr)
-                       ring_buffer_resize(max_tr.buffer, 1,
-                                               RING_BUFFER_ALL_CPUS);
+               if (type->use_max_tr) {
+                       type->allocated_snapshot = false;
+
+                       /* Shrink the max buffer again */
+                       if (ring_buffer_expanded)
+                               ring_buffer_resize(max_tr.buffer, 1,
+                                                  RING_BUFFER_ALL_CPUS);
+               }
 
                printk(KERN_CONT "PASSED\n");
        }
@@ -922,6 +931,9 @@ void tracing_reset(struct trace_array *tr, int cpu)
 {
        struct ring_buffer *buffer = tr->buffer;
 
+       if (!buffer)
+               return;
+
        ring_buffer_record_disable(buffer);
 
        /* Make sure all commits have finished */
@@ -936,6 +948,9 @@ void tracing_reset_online_cpus(struct trace_array *tr)
        struct ring_buffer *buffer = tr->buffer;
        int cpu;
 
+       if (!buffer)
+               return;
+
        ring_buffer_record_disable(buffer);
 
        /* Make sure all commits have finished */
@@ -1167,7 +1182,6 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
-       entry->padding                  = 0;
        entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
@@ -1335,7 +1349,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
         */
        preempt_disable_notrace();
 
-       use_stack = ++__get_cpu_var(ftrace_stack_reserve);
+       use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
        /*
         * We don't need any atomic variables, just a barrier.
         * If an interrupt comes in, we don't care, because it would
@@ -1389,7 +1403,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
  out:
        /* Again, don't let gcc optimize things here */
        barrier();
-       __get_cpu_var(ftrace_stack_reserve)--;
+       __this_cpu_dec(ftrace_stack_reserve);
        preempt_enable_notrace();
 
 }
@@ -1517,7 +1531,6 @@ static struct trace_buffer_struct *trace_percpu_nmi_buffer;
 static char *get_trace_buf(void)
 {
        struct trace_buffer_struct *percpu_buffer;
-       struct trace_buffer_struct *buffer;
 
        /*
         * If we have allocated per cpu buffers, then we do not
@@ -1535,9 +1548,7 @@ static char *get_trace_buf(void)
        if (!percpu_buffer)
                return NULL;
 
-       buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
-
-       return buffer->buffer;
+       return this_cpu_ptr(&percpu_buffer->buffer[0]);
 }
 
 static int alloc_percpu_trace_buffer(void)
@@ -1942,21 +1953,27 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 static void *s_start(struct seq_file *m, loff_t *pos)
 {
        struct trace_iterator *iter = m->private;
-       static struct tracer *old_tracer;
        int cpu_file = iter->cpu_file;
        void *p = NULL;
        loff_t l = 0;
        int cpu;
 
-       /* copy the tracer to avoid using a global lock all around */
+       /*
+        * copy the tracer to avoid using a global lock all around.
+        * iter->trace is a copy of current_trace, the pointer to the
+        * name may be used instead of a strcmp(), as iter->trace->name
+        * will point to the same string as current_trace->name.
+        */
        mutex_lock(&trace_types_lock);
-       if (unlikely(old_tracer != current_trace && current_trace)) {
-               old_tracer = current_trace;
+       if (unlikely(current_trace && iter->trace->name != current_trace->name))
                *iter->trace = *current_trace;
-       }
        mutex_unlock(&trace_types_lock);
 
-       atomic_inc(&trace_record_cmdline_disabled);
+       if (iter->snapshot && iter->trace->use_max_tr)
+               return ERR_PTR(-EBUSY);
+
+       if (!iter->snapshot)
+               atomic_inc(&trace_record_cmdline_disabled);
 
        if (*pos != iter->pos) {
                iter->ent = NULL;
@@ -1995,7 +2012,11 @@ static void s_stop(struct seq_file *m, void *p)
 {
        struct trace_iterator *iter = m->private;
 
-       atomic_dec(&trace_record_cmdline_disabled);
+       if (iter->snapshot && iter->trace->use_max_tr)
+               return;
+
+       if (!iter->snapshot)
+               atomic_dec(&trace_record_cmdline_disabled);
        trace_access_unlock(iter->cpu_file);
        trace_event_read_unlock();
 }
@@ -2080,8 +2101,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
        unsigned long total;
        const char *name = "preemption";
 
-       if (type)
-               name = type->name;
+       name = type->name;
 
        get_total_entries(tr, &total, &entries);
 
@@ -2430,7 +2450,7 @@ static const struct seq_operations tracer_seq_ops = {
 };
 
 static struct trace_iterator *
-__tracing_open(struct inode *inode, struct file *file)
+__tracing_open(struct inode *inode, struct file *file, bool snapshot)
 {
        long cpu_file = (long) inode->i_private;
        struct trace_iterator *iter;
@@ -2457,16 +2477,16 @@ __tracing_open(struct inode *inode, struct file *file)
        if (!iter->trace)
                goto fail;
 
-       if (current_trace)
-               *iter->trace = *current_trace;
+       *iter->trace = *current_trace;
 
        if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
                goto fail;
 
-       if (current_trace && current_trace->print_max)
+       if (current_trace->print_max || snapshot)
                iter->tr = &max_tr;
        else
                iter->tr = &global_trace;
+       iter->snapshot = snapshot;
        iter->pos = -1;
        mutex_init(&iter->mutex);
        iter->cpu_file = cpu_file;
@@ -2483,8 +2503,9 @@ __tracing_open(struct inode *inode, struct file *file)
        if (trace_clocks[trace_clock_id].in_ns)
                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
-       /* stop the trace while dumping */
-       tracing_stop();
+       /* stop the trace while dumping if we are not opening "snapshot" */
+       if (!iter->snapshot)
+               tracing_stop();
 
        if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
                for_each_tracing_cpu(cpu) {
@@ -2547,8 +2568,9 @@ static int tracing_release(struct inode *inode, struct file *file)
        if (iter->trace && iter->trace->close)
                iter->trace->close(iter);
 
-       /* reenable tracing if it was previously enabled */
-       tracing_start();
+       if (!iter->snapshot)
+               /* reenable tracing if it was previously enabled */
+               tracing_start();
        mutex_unlock(&trace_types_lock);
 
        mutex_destroy(&iter->mutex);
@@ -2576,7 +2598,7 @@ static int tracing_open(struct inode *inode, struct file *file)
        }
 
        if (file->f_mode & FMODE_READ) {
-               iter = __tracing_open(inode, file);
+               iter = __tracing_open(inode, file, false);
                if (IS_ERR(iter))
                        ret = PTR_ERR(iter);
                else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -2899,6 +2921,8 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
 
+       buf[cnt] = 0;
+
        trace_set_options(buf);
 
        *ppos += cnt;
@@ -3012,10 +3036,7 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf,
        int r;
 
        mutex_lock(&trace_types_lock);
-       if (current_trace)
-               r = sprintf(buf, "%s\n", current_trace->name);
-       else
-               r = sprintf(buf, "\n");
+       r = sprintf(buf, "%s\n", current_trace->name);
        mutex_unlock(&trace_types_lock);
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -3181,6 +3202,7 @@ static int tracing_set_tracer(const char *buf)
        static struct trace_option_dentry *topts;
        struct trace_array *tr = &global_trace;
        struct tracer *t;
+       bool had_max_tr;
        int ret = 0;
 
        mutex_lock(&trace_types_lock);
@@ -3205,9 +3227,21 @@ static int tracing_set_tracer(const char *buf)
                goto out;
 
        trace_branch_disable();
-       if (current_trace && current_trace->reset)
+       if (current_trace->reset)
                current_trace->reset(tr);
-       if (current_trace && current_trace->use_max_tr) {
+
+       had_max_tr = current_trace->allocated_snapshot;
+       current_trace = &nop_trace;
+
+       if (had_max_tr && !t->use_max_tr) {
+               /*
+                * We need to make sure that the update_max_tr sees that
+                * current_trace changed to nop_trace to keep it from
+                * swapping the buffers after we resize it.
+                * The update_max_tr is called from interrupts disabled
+                * so a synchronized_sched() is sufficient.
+                */
+               synchronize_sched();
                /*
                 * We don't free the ring buffer. instead, resize it because
                 * The max_tr ring buffer has some state (e.g. ring->clock) and
@@ -3215,18 +3249,19 @@ static int tracing_set_tracer(const char *buf)
                 */
                ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
                set_buffer_entries(&max_tr, 1);
+               tracing_reset_online_cpus(&max_tr);
+               current_trace->allocated_snapshot = false;
        }
        destroy_trace_option_files(topts);
 
-       current_trace = &nop_trace;
-
        topts = create_trace_option_files(t);
-       if (t->use_max_tr) {
+       if (t->use_max_tr && !had_max_tr) {
                /* we need to make per cpu buffer sizes equivalent */
                ret = resize_buffer_duplicate_size(&max_tr, &global_trace,
                                                   RING_BUFFER_ALL_CPUS);
                if (ret < 0)
                        goto out;
+               t->allocated_snapshot = true;
        }
 
        if (t->init) {
@@ -3334,8 +3369,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
                ret = -ENOMEM;
                goto fail;
        }
-       if (current_trace)
-               *iter->trace = *current_trace;
+       *iter->trace = *current_trace;
 
        if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
                ret = -ENOMEM;
@@ -3452,7 +3486,7 @@ static int tracing_wait_pipe(struct file *filp)
                        return -EINTR;
 
                /*
-                * We block until we read something and tracing is enabled.
+                * We block until we read something and tracing is disabled.
                 * We still block if tracing is disabled, but we have never
                 * read anything. This allows a user to cat this file, and
                 * then enable tracing. But after we have read something,
@@ -3460,7 +3494,7 @@ static int tracing_wait_pipe(struct file *filp)
                 *
                 * iter->pos will be 0 if we haven't read anything.
                 */
-               if (tracing_is_enabled() && iter->pos)
+               if (!tracing_is_enabled() && iter->pos)
                        break;
        }
 
@@ -3475,7 +3509,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
        struct trace_iterator *iter = filp->private_data;
-       static struct tracer *old_tracer;
        ssize_t sret;
 
        /* return any leftover data */
@@ -3487,10 +3520,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
 
        /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
-       if (unlikely(old_tracer != current_trace && current_trace)) {
-               old_tracer = current_trace;
+       if (unlikely(iter->trace->name != current_trace->name))
                *iter->trace = *current_trace;
-       }
        mutex_unlock(&trace_types_lock);
 
        /*
@@ -3646,7 +3677,6 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
                .ops            = &tracing_pipe_buf_ops,
                .spd_release    = tracing_spd_release_pipe,
        };
-       static struct tracer *old_tracer;
        ssize_t ret;
        size_t rem;
        unsigned int i;
@@ -3656,10 +3686,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
 
        /* copy the tracer to avoid using a global lock all around */
        mutex_lock(&trace_types_lock);
-       if (unlikely(old_tracer != current_trace && current_trace)) {
-               old_tracer = current_trace;
+       if (unlikely(iter->trace->name != current_trace->name))
                *iter->trace = *current_trace;
-       }
        mutex_unlock(&trace_types_lock);
 
        mutex_lock(&iter->mutex);
@@ -4035,8 +4063,7 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
         * Reset the buffer so that it doesn't have incomparable timestamps.
         */
        tracing_reset_online_cpus(&global_trace);
-       if (max_tr.buffer)
-               tracing_reset_online_cpus(&max_tr);
+       tracing_reset_online_cpus(&max_tr);
 
        mutex_unlock(&trace_types_lock);
 
@@ -4052,6 +4079,87 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
        return single_open(file, tracing_clock_show, NULL);
 }
 
+#ifdef CONFIG_TRACER_SNAPSHOT
+static int tracing_snapshot_open(struct inode *inode, struct file *file)
+{
+       struct trace_iterator *iter;
+       int ret = 0;
+
+       if (file->f_mode & FMODE_READ) {
+               iter = __tracing_open(inode, file, true);
+               if (IS_ERR(iter))
+                       ret = PTR_ERR(iter);
+       }
+       return ret;
+}
+
+static ssize_t
+tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                      loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+
+       ret = tracing_update_buffers();
+       if (ret < 0)
+               return ret;
+
+       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+       if (ret)
+               return ret;
+
+       mutex_lock(&trace_types_lock);
+
+       if (current_trace->use_max_tr) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       switch (val) {
+       case 0:
+               if (current_trace->allocated_snapshot) {
+                       /* free spare buffer */
+                       ring_buffer_resize(max_tr.buffer, 1,
+                                          RING_BUFFER_ALL_CPUS);
+                       set_buffer_entries(&max_tr, 1);
+                       tracing_reset_online_cpus(&max_tr);
+                       current_trace->allocated_snapshot = false;
+               }
+               break;
+       case 1:
+               if (!current_trace->allocated_snapshot) {
+                       /* allocate spare buffer */
+                       ret = resize_buffer_duplicate_size(&max_tr,
+                                       &global_trace, RING_BUFFER_ALL_CPUS);
+                       if (ret < 0)
+                               break;
+                       current_trace->allocated_snapshot = true;
+               }
+
+               local_irq_disable();
+               /* Now, we're going to swap */
+               update_max_tr(&global_trace, current, smp_processor_id());
+               local_irq_enable();
+               break;
+       default:
+               if (current_trace->allocated_snapshot)
+                       tracing_reset_online_cpus(&max_tr);
+               else
+                       ret = -EINVAL;
+               break;
+       }
+
+       if (ret >= 0) {
+               *ppos += cnt;
+               ret = cnt;
+       }
+out:
+       mutex_unlock(&trace_types_lock);
+       return ret;
+}
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
+
 static const struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
@@ -4108,6 +4216,16 @@ static const struct file_operations trace_clock_fops = {
        .write          = tracing_clock_write,
 };
 
+#ifdef CONFIG_TRACER_SNAPSHOT
+static const struct file_operations snapshot_fops = {
+       .open           = tracing_snapshot_open,
+       .read           = seq_read,
+       .write          = tracing_snapshot_write,
+       .llseek         = tracing_seek,
+       .release        = tracing_release,
+};
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
 struct ftrace_buffer_info {
        struct trace_array      *tr;
        void                    *spare;
@@ -4412,6 +4530,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
        cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
        trace_seq_printf(s, "dropped events: %ld\n", cnt);
 
+       cnt = ring_buffer_read_events_cpu(tr->buffer, cpu);
+       trace_seq_printf(s, "read events: %ld\n", cnt);
+
        count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
 
        kfree(s);
@@ -4488,7 +4609,7 @@ struct dentry *tracing_init_dentry(void)
 
 static struct dentry *d_percpu;
 
-struct dentry *tracing_dentry_percpu(void)
+static struct dentry *tracing_dentry_percpu(void)
 {
        static int once;
        struct dentry *d_tracer;
@@ -4815,10 +4936,17 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
                return ret;
 
        if (buffer) {
-               if (val)
+               mutex_lock(&trace_types_lock);
+               if (val) {
                        ring_buffer_record_on(buffer);
-               else
+                       if (current_trace->start)
+                               current_trace->start(tr);
+               } else {
                        ring_buffer_record_off(buffer);
+                       if (current_trace->stop)
+                               current_trace->stop(tr);
+               }
+               mutex_unlock(&trace_types_lock);
        }
 
        (*ppos)++;
@@ -4897,6 +5025,11 @@ static __init int tracer_init_debugfs(void)
                        &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
 #endif
 
+#ifdef CONFIG_TRACER_SNAPSHOT
+       trace_create_file("snapshot", 0644, d_tracer,
+                         (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
+#endif
+
        create_trace_options_dir();
 
        for_each_tracing_cpu(cpu)
@@ -5005,6 +5138,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
        if (disable_tracing)
                ftrace_kill();
 
+       /* Simulate the iterator */
        trace_init_global_iter(&iter);
 
        for_each_tracing_cpu(cpu) {
@@ -5016,10 +5150,6 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
        /* don't look at user memory in panic mode */
        trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
 
-       /* Simulate the iterator */
-       iter.tr = &global_trace;
-       iter.trace = current_trace;
-
        switch (oops_dump_mode) {
        case DUMP_ALL:
                iter.cpu_file = TRACE_PIPE_ALL_CPU;
@@ -5164,7 +5294,7 @@ __init static int tracer_alloc_buffers(void)
        init_irq_work(&trace_work_wakeup, trace_wake_up);
 
        register_tracer(&nop_trace);
-       current_trace = &nop_trace;
+
        /* All seems OK, enable tracing */
        tracing_disabled = 0;
 
index c75d7988902caa5b7ce38d3cdad888d9ade9f105..57d7e5397d56da86f667e3139fb14703a99e8fcf 100644 (file)
@@ -287,20 +287,62 @@ struct tracer {
        struct tracer_flags     *flags;
        bool                    print_max;
        bool                    use_max_tr;
+       bool                    allocated_snapshot;
 };
 
 
 /* Only current can touch trace_recursion */
-#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
-#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
 
-/* Ring buffer has the 10 LSB bits to count */
-#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
-
-/* for function tracing recursion */
-#define TRACE_INTERNAL_BIT             (1<<11)
-#define TRACE_GLOBAL_BIT               (1<<12)
-#define TRACE_CONTROL_BIT              (1<<13)
+/*
+ * For function tracing recursion:
+ *  The order of these bits are important.
+ *
+ *  When function tracing occurs, the following steps are made:
+ *   If arch does not support a ftrace feature:
+ *    call internal function (uses INTERNAL bits) which calls...
+ *   If callback is registered to the "global" list, the list
+ *    function is called and recursion checks the GLOBAL bits.
+ *    then this function calls...
+ *   The function callback, which can use the FTRACE bits to
+ *    check for recursion.
+ *
+ * Now if the arch does not suppport a feature, and it calls
+ * the global list function which calls the ftrace callback
+ * all three of these steps will do a recursion protection.
+ * There's no reason to do one if the previous caller already
+ * did. The recursion that we are protecting against will
+ * go through the same steps again.
+ *
+ * To prevent the multiple recursion checks, if a recursion
+ * bit is set that is higher than the MAX bit of the current
+ * check, then we know that the check was made by the previous
+ * caller, and we can skip the current check.
+ */
+enum {
+       TRACE_BUFFER_BIT,
+       TRACE_BUFFER_NMI_BIT,
+       TRACE_BUFFER_IRQ_BIT,
+       TRACE_BUFFER_SIRQ_BIT,
+
+       /* Start of function recursion bits */
+       TRACE_FTRACE_BIT,
+       TRACE_FTRACE_NMI_BIT,
+       TRACE_FTRACE_IRQ_BIT,
+       TRACE_FTRACE_SIRQ_BIT,
+
+       /* GLOBAL_BITs must be greater than FTRACE_BITs */
+       TRACE_GLOBAL_BIT,
+       TRACE_GLOBAL_NMI_BIT,
+       TRACE_GLOBAL_IRQ_BIT,
+       TRACE_GLOBAL_SIRQ_BIT,
+
+       /* INTERNAL_BITs must be greater than GLOBAL_BITs */
+       TRACE_INTERNAL_BIT,
+       TRACE_INTERNAL_NMI_BIT,
+       TRACE_INTERNAL_IRQ_BIT,
+       TRACE_INTERNAL_SIRQ_BIT,
+
+       TRACE_CONTROL_BIT,
 
 /*
  * Abuse of the trace_recursion.
@@ -309,11 +351,77 @@ struct tracer {
  * was called in irq context but we have irq tracing off. Since this
  * can only be modified by current, we can reuse trace_recursion.
  */
-#define TRACE_IRQ_BIT                  (1<<13)
+       TRACE_IRQ_BIT,
+};
+
+#define trace_recursion_set(bit)       do { (current)->trace_recursion |= (1<<(bit)); } while (0)
+#define trace_recursion_clear(bit)     do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
+#define trace_recursion_test(bit)      ((current)->trace_recursion & (1<<(bit)))
+
+#define TRACE_CONTEXT_BITS     4
+
+#define TRACE_FTRACE_START     TRACE_FTRACE_BIT
+#define TRACE_FTRACE_MAX       ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
+
+#define TRACE_GLOBAL_START     TRACE_GLOBAL_BIT
+#define TRACE_GLOBAL_MAX       ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
+
+#define TRACE_LIST_START       TRACE_INTERNAL_BIT
+#define TRACE_LIST_MAX         ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
+
+#define TRACE_CONTEXT_MASK     TRACE_LIST_MAX
+
+static __always_inline int trace_get_context_bit(void)
+{
+       int bit;
 
-#define trace_recursion_set(bit)       do { (current)->trace_recursion |= (bit); } while (0)
-#define trace_recursion_clear(bit)     do { (current)->trace_recursion &= ~(bit); } while (0)
-#define trace_recursion_test(bit)      ((current)->trace_recursion & (bit))
+       if (in_interrupt()) {
+               if (in_nmi())
+                       bit = 0;
+
+               else if (in_irq())
+                       bit = 1;
+               else
+                       bit = 2;
+       } else
+               bit = 3;
+
+       return bit;
+}
+
+static __always_inline int trace_test_and_set_recursion(int start, int max)
+{
+       unsigned int val = current->trace_recursion;
+       int bit;
+
+       /* A previous recursion check was made */
+       if ((val & TRACE_CONTEXT_MASK) > max)
+               return 0;
+
+       bit = trace_get_context_bit() + start;
+       if (unlikely(val & (1 << bit)))
+               return -1;
+
+       val |= 1 << bit;
+       current->trace_recursion = val;
+       barrier();
+
+       return bit;
+}
+
+static __always_inline void trace_clear_recursion(int bit)
+{
+       unsigned int val = current->trace_recursion;
+
+       if (!bit)
+               return;
+
+       bit = 1 << bit;
+       val &= ~bit;
+
+       barrier();
+       current->trace_recursion = val;
+}
 
 #define TRACE_PIPE_ALL_CPU     -1
 
index 394783531cbb76d9f7d9ab34ae0b6963d5d14dff..aa8f5f48dae610566f4b777119e1b3735b43170f 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/ktime.h>
 #include <linux/trace_clock.h>
 
-#include "trace.h"
-
 /*
  * trace_clock_local(): the simplest and least coherent tracing clock.
  *
@@ -44,6 +42,7 @@ u64 notrace trace_clock_local(void)
 
        return clock;
 }
+EXPORT_SYMBOL_GPL(trace_clock_local);
 
 /*
  * trace_clock(): 'between' trace clock. Not completely serialized,
@@ -86,7 +85,7 @@ u64 notrace trace_clock_global(void)
        local_irq_save(flags);
 
        this_cpu = raw_smp_processor_id();
-       now = cpu_clock(this_cpu);
+       now = sched_clock_cpu(this_cpu);
        /*
         * If in an NMI context then dont risk lockups and return the
         * cpu_clock() time:
index 880073d0b946fc58865b426511b038ca24271004..57e9b284250c4ea30d7ee11692066644099acc32 100644 (file)
@@ -116,7 +116,6 @@ static int trace_define_common_fields(void)
        __common_field(unsigned char, flags);
        __common_field(unsigned char, preempt_count);
        __common_field(int, pid);
-       __common_field(int, padding);
 
        return ret;
 }
index 8e3ad8082ab7074fde4ea9da403961afd2eadc45..60115252332668f2b39c4c868304b9187017b879 100644 (file)
@@ -47,34 +47,6 @@ static void function_trace_start(struct trace_array *tr)
        tracing_reset_online_cpus(tr);
 }
 
-static void
-function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
-                                struct ftrace_ops *op, struct pt_regs *pt_regs)
-{
-       struct trace_array *tr = func_trace;
-       struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
-       int cpu;
-       int pc;
-
-       if (unlikely(!ftrace_function_enabled))
-               return;
-
-       pc = preempt_count();
-       preempt_disable_notrace();
-       local_save_flags(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
-
-       if (likely(disabled == 1))
-               trace_function(tr, ip, parent_ip, flags, pc);
-
-       atomic_dec(&data->disabled);
-       preempt_enable_notrace();
-}
-
 /* Our option */
 enum {
        TRACE_FUNC_OPT_STACK    = 0x1,
@@ -85,34 +57,34 @@ static struct tracer_flags func_flags;
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip,
                    struct ftrace_ops *op, struct pt_regs *pt_regs)
-
 {
        struct trace_array *tr = func_trace;
        struct trace_array_cpu *data;
        unsigned long flags;
-       long disabled;
+       int bit;
        int cpu;
        int pc;
 
        if (unlikely(!ftrace_function_enabled))
                return;
 
-       /*
-        * Need to use raw, since this must be called before the
-        * recursive protection is performed.
-        */
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
+       pc = preempt_count();
+       preempt_disable_notrace();
 
-       if (likely(disabled == 1)) {
-               pc = preempt_count();
+       bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
+       if (bit < 0)
+               goto out;
+
+       cpu = smp_processor_id();
+       data = tr->data[cpu];
+       if (!atomic_read(&data->disabled)) {
+               local_save_flags(flags);
                trace_function(tr, ip, parent_ip, flags, pc);
        }
+       trace_clear_recursion(bit);
 
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+ out:
+       preempt_enable_notrace();
 }
 
 static void
@@ -185,11 +157,6 @@ static void tracing_start_function_trace(void)
 {
        ftrace_function_enabled = 0;
 
-       if (trace_flags & TRACE_ITER_PREEMPTONLY)
-               trace_ops.func = function_trace_call_preempt_only;
-       else
-               trace_ops.func = function_trace_call;
-
        if (func_flags.val & TRACE_FUNC_OPT_STACK)
                register_ftrace_function(&trace_stack_ops);
        else
index 4edb4b74eb7e75ba171240882b20ee14fbb41099..39ada66389ccce96a92a17d5eba63bbb7fe37e63 100644 (file)
@@ -47,6 +47,8 @@ struct fgraph_data {
 #define TRACE_GRAPH_PRINT_ABS_TIME     0x20
 #define TRACE_GRAPH_PRINT_IRQS         0x40
 
+static unsigned int max_depth;
+
 static struct tracer_opt trace_opts[] = {
        /* Display overruns? (for self-debug purpose) */
        { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
@@ -189,10 +191,16 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 
        ftrace_pop_return_trace(&trace, &ret, frame_pointer);
        trace.rettime = trace_clock_local();
-       ftrace_graph_return(&trace);
        barrier();
        current->curr_ret_stack--;
 
+       /*
+        * The trace should run after decrementing the ret counter
+        * in case an interrupt were to come in. We don't want to
+        * lose the interrupt if max_depth is set.
+        */
+       ftrace_graph_return(&trace);
+
        if (unlikely(!ret)) {
                ftrace_graph_stop();
                WARN_ON(1);
@@ -250,8 +258,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
                return 0;
 
        /* trace it when it is-nested-in or is a function enabled. */
-       if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
-             ftrace_graph_ignore_irqs())
+       if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
+            ftrace_graph_ignore_irqs()) ||
+           (max_depth && trace->depth >= max_depth))
                return 0;
 
        local_irq_save(flags);
@@ -1457,6 +1466,59 @@ static struct tracer graph_trace __read_mostly = {
 #endif
 };
 
+
+static ssize_t
+graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
+                 loff_t *ppos)
+{
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+       if (ret)
+               return ret;
+
+       max_depth = val;
+
+       *ppos += cnt;
+
+       return cnt;
+}
+
+static ssize_t
+graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
+                loff_t *ppos)
+{
+       char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
+       int n;
+
+       n = sprintf(buf, "%d\n", max_depth);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
+}
+
+static const struct file_operations graph_depth_fops = {
+       .open           = tracing_open_generic,
+       .write          = graph_depth_write,
+       .read           = graph_depth_read,
+       .llseek         = generic_file_llseek,
+};
+
+static __init int init_graph_debugfs(void)
+{
+       struct dentry *d_tracer;
+
+       d_tracer = tracing_init_dentry();
+       if (!d_tracer)
+               return 0;
+
+       trace_create_file("max_graph_depth", 0644, d_tracer,
+                         NULL, &graph_depth_fops);
+
+       return 0;
+}
+fs_initcall(init_graph_debugfs);
+
 static __init int init_graph_trace(void)
 {
        max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
index 933708677814573c5c133b96574a919e842963ec..5c7e09d10d74d54638a0d845ca0d5c5f1a328f12 100644 (file)
@@ -66,7 +66,6 @@
 #define TP_FLAG_TRACE          1
 #define TP_FLAG_PROFILE                2
 #define TP_FLAG_REGISTERED     4
-#define TP_FLAG_UPROBE         8
 
 
 /* data_rloc: data relative location, compatible with u32 */
index 9fe45fcefca084b804b37d8a4f782fdafa87f1bb..75aa97fbe1a11120d93ed241e00ddbac5a51a55d 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/kallsyms.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
+#include <linux/sched/rt.h>
 #include <trace/events/sched.h>
-
 #include "trace.h"
 
 static struct trace_array      *wakeup_trace;
index 47623169a815def678e012083aa70a21fd7c8ec9..51c819c12c2916c8e93226c79d3fc464dc190ec3 100644 (file)
@@ -415,7 +415,8 @@ static void trace_selftest_test_recursion_func(unsigned long ip,
         * The ftrace infrastructure should provide the recursion
         * protection. If not, this will crash the kernel!
         */
-       trace_selftest_recursion_cnt++;
+       if (trace_selftest_recursion_cnt++ > 10)
+               return;
        DYN_FTRACE_TEST_NAME();
 }
 
@@ -452,7 +453,6 @@ trace_selftest_function_recursion(void)
        char *func_name;
        int len;
        int ret;
-       int cnt;
 
        /* The previous test PASSED */
        pr_cont("PASSED\n");
@@ -510,19 +510,10 @@ trace_selftest_function_recursion(void)
 
        unregister_ftrace_function(&test_recsafe_probe);
 
-       /*
-        * If arch supports all ftrace features, and no other task
-        * was on the list, we should be fine.
-        */
-       if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
-               cnt = 2; /* Should have recursed */
-       else
-               cnt = 1;
-
        ret = -1;
-       if (trace_selftest_recursion_cnt != cnt) {
-               pr_cont("*callback not called expected %d times (%d)* ",
-                       cnt, trace_selftest_recursion_cnt);
+       if (trace_selftest_recursion_cnt != 2) {
+               pr_cont("*callback not called expected 2 times (%d)* ",
+                       trace_selftest_recursion_cnt);
                goto out;
        }
 
@@ -568,7 +559,7 @@ trace_selftest_function_regs(void)
        int ret;
        int supported = 0;
 
-#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
        supported = 1;
 #endif
 
index 7609dd6714c2acc1a8862a47fbb014ef4f50e1c2..5329e13e74a13816969771b421852916f8bc3a50 100644 (file)
@@ -77,7 +77,7 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
        return syscalls_metadata[nr];
 }
 
-enum print_line_t
+static enum print_line_t
 print_syscall_enter(struct trace_iterator *iter, int flags,
                    struct trace_event *event)
 {
@@ -130,7 +130,7 @@ end:
        return TRACE_TYPE_HANDLED;
 }
 
-enum print_line_t
+static enum print_line_t
 print_syscall_exit(struct trace_iterator *iter, int flags,
                   struct trace_event *event)
 {
@@ -270,7 +270,7 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call)
        return ret;
 }
 
-void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
+static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
 {
        struct syscall_trace_enter *entry;
        struct syscall_metadata *sys_data;
@@ -305,7 +305,7 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
                trace_current_buffer_unlock_commit(buffer, event, 0, 0);
 }
 
-void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
+static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
 {
        struct syscall_trace_exit *entry;
        struct syscall_metadata *sys_data;
@@ -337,7 +337,7 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
                trace_current_buffer_unlock_commit(buffer, event, 0, 0);
 }
 
-int reg_event_syscall_enter(struct ftrace_event_call *call)
+static int reg_event_syscall_enter(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
@@ -356,7 +356,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
        return ret;
 }
 
-void unreg_event_syscall_enter(struct ftrace_event_call *call)
+static void unreg_event_syscall_enter(struct ftrace_event_call *call)
 {
        int num;
 
@@ -371,7 +371,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call)
        mutex_unlock(&syscall_trace_lock);
 }
 
-int reg_event_syscall_exit(struct ftrace_event_call *call)
+static int reg_event_syscall_exit(struct ftrace_event_call *call)
 {
        int ret = 0;
        int num;
@@ -390,7 +390,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
        return ret;
 }
 
-void unreg_event_syscall_exit(struct ftrace_event_call *call)
+static void unreg_event_syscall_exit(struct ftrace_event_call *call)
 {
        int num;
 
@@ -459,7 +459,7 @@ unsigned long __init __weak arch_syscall_addr(int nr)
        return (unsigned long)sys_call_table[nr];
 }
 
-int __init init_ftrace_syscalls(void)
+static int __init init_ftrace_syscalls(void)
 {
        struct syscall_metadata *meta;
        unsigned long addr;
index c86e6d4f67fbfac4a81c6c58d7194afa7603f7e5..8dad2a92dee9c9fff297d62edfe72147a1b371e4 100644 (file)
 
 #define UPROBE_EVENT_SYSTEM    "uprobes"
 
+struct trace_uprobe_filter {
+       rwlock_t                rwlock;
+       int                     nr_systemwide;
+       struct list_head        perf_events;
+};
+
 /*
  * uprobe event core functions
  */
-struct trace_uprobe;
-struct uprobe_trace_consumer {
-       struct uprobe_consumer          cons;
-       struct trace_uprobe             *tu;
-};
-
 struct trace_uprobe {
        struct list_head                list;
        struct ftrace_event_class       class;
        struct ftrace_event_call        call;
-       struct uprobe_trace_consumer    *consumer;
+       struct trace_uprobe_filter      filter;
+       struct uprobe_consumer          consumer;
        struct inode                    *inode;
        char                            *filename;
        unsigned long                   offset;
@@ -64,6 +65,18 @@ static LIST_HEAD(uprobe_list);
 
 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
 
+static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
+{
+       rwlock_init(&filter->rwlock);
+       filter->nr_systemwide = 0;
+       INIT_LIST_HEAD(&filter->perf_events);
+}
+
+static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
+{
+       return !filter->nr_systemwide && list_empty(&filter->perf_events);
+}
+
 /*
  * Allocate new trace_uprobe and initialize it (including uprobes).
  */
@@ -92,6 +105,8 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs)
                goto error;
 
        INIT_LIST_HEAD(&tu->list);
+       tu->consumer.handler = uprobe_dispatcher;
+       init_trace_uprobe_filter(&tu->filter);
        return tu;
 
 error:
@@ -253,12 +268,18 @@ static int create_trace_uprobe(int argc, char **argv)
        if (ret)
                goto fail_address_parse;
 
+       inode = igrab(path.dentry->d_inode);
+       path_put(&path);
+
+       if (!inode || !S_ISREG(inode->i_mode)) {
+               ret = -EINVAL;
+               goto fail_address_parse;
+       }
+
        ret = kstrtoul(arg, 0, &offset);
        if (ret)
                goto fail_address_parse;
 
-       inode = igrab(path.dentry->d_inode);
-
        argc -= 2;
        argv += 2;
 
@@ -356,7 +377,7 @@ fail_address_parse:
        if (inode)
                iput(inode);
 
-       pr_info("Failed to parse address.\n");
+       pr_info("Failed to parse address or file.\n");
 
        return ret;
 }
@@ -465,7 +486,7 @@ static const struct file_operations uprobe_profile_ops = {
 };
 
 /* uprobe handler */
-static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
+static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
 {
        struct uprobe_trace_entry_head *entry;
        struct ring_buffer_event *event;
@@ -475,8 +496,6 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
        unsigned long irq_flags;
        struct ftrace_event_call *call = &tu->call;
 
-       tu->nhit++;
-
        local_save_flags(irq_flags);
        pc = preempt_count();
 
@@ -485,16 +504,18 @@ static void uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs)
        event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
                                                  size, irq_flags, pc);
        if (!event)
-               return;
+               return 0;
 
        entry = ring_buffer_event_data(event);
-       entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
+       entry->ip = instruction_pointer(task_pt_regs(current));
        data = (u8 *)&entry[1];
        for (i = 0; i < tu->nr_args; i++)
                call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
 
        if (!filter_current_check_discard(buffer, call, entry, event))
                trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+
+       return 0;
 }
 
 /* Event entry printers */
@@ -533,42 +554,43 @@ partial:
        return TRACE_TYPE_PARTIAL_LINE;
 }
 
-static int probe_event_enable(struct trace_uprobe *tu, int flag)
+static inline bool is_trace_uprobe_enabled(struct trace_uprobe *tu)
 {
-       struct uprobe_trace_consumer *utc;
-       int ret = 0;
+       return tu->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE);
+}
 
-       if (!tu->inode || tu->consumer)
-               return -EINTR;
+typedef bool (*filter_func_t)(struct uprobe_consumer *self,
+                               enum uprobe_filter_ctx ctx,
+                               struct mm_struct *mm);
 
-       utc = kzalloc(sizeof(struct uprobe_trace_consumer), GFP_KERNEL);
-       if (!utc)
+static int
+probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter)
+{
+       int ret = 0;
+
+       if (is_trace_uprobe_enabled(tu))
                return -EINTR;
 
-       utc->cons.handler = uprobe_dispatcher;
-       utc->cons.filter = NULL;
-       ret = uprobe_register(tu->inode, tu->offset, &utc->cons);
-       if (ret) {
-               kfree(utc);
-               return ret;
-       }
+       WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 
        tu->flags |= flag;
-       utc->tu = tu;
-       tu->consumer = utc;
+       tu->consumer.filter = filter;
+       ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
+       if (ret)
+               tu->flags &= ~flag;
 
-       return 0;
+       return ret;
 }
 
 static void probe_event_disable(struct trace_uprobe *tu, int flag)
 {
-       if (!tu->inode || !tu->consumer)
+       if (!is_trace_uprobe_enabled(tu))
                return;
 
-       uprobe_unregister(tu->inode, tu->offset, &tu->consumer->cons);
+       WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+
+       uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
        tu->flags &= ~flag;
-       kfree(tu->consumer);
-       tu->consumer = NULL;
 }
 
 static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
@@ -642,8 +664,96 @@ static int set_print_fmt(struct trace_uprobe *tu)
 }
 
 #ifdef CONFIG_PERF_EVENTS
+static bool
+__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
+{
+       struct perf_event *event;
+
+       if (filter->nr_systemwide)
+               return true;
+
+       list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
+               if (event->hw.tp_target->mm == mm)
+                       return true;
+       }
+
+       return false;
+}
+
+static inline bool
+uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+{
+       return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
+}
+
+static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
+{
+       bool done;
+
+       write_lock(&tu->filter.rwlock);
+       if (event->hw.tp_target) {
+               /*
+                * event->parent != NULL means copy_process(), we can avoid
+                * uprobe_apply(). current->mm must be probed and we can rely
+                * on dup_mmap() which preserves the already installed bp's.
+                *
+                * attr.enable_on_exec means that exec/mmap will install the
+                * breakpoints we need.
+                */
+               done = tu->filter.nr_systemwide ||
+                       event->parent || event->attr.enable_on_exec ||
+                       uprobe_filter_event(tu, event);
+               list_add(&event->hw.tp_list, &tu->filter.perf_events);
+       } else {
+               done = tu->filter.nr_systemwide;
+               tu->filter.nr_systemwide++;
+       }
+       write_unlock(&tu->filter.rwlock);
+
+       if (!done)
+               uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+
+       return 0;
+}
+
+static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
+{
+       bool done;
+
+       write_lock(&tu->filter.rwlock);
+       if (event->hw.tp_target) {
+               list_del(&event->hw.tp_list);
+               done = tu->filter.nr_systemwide ||
+                       (event->hw.tp_target->flags & PF_EXITING) ||
+                       uprobe_filter_event(tu, event);
+       } else {
+               tu->filter.nr_systemwide--;
+               done = tu->filter.nr_systemwide;
+       }
+       write_unlock(&tu->filter.rwlock);
+
+       if (!done)
+               uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
+
+       return 0;
+}
+
+static bool uprobe_perf_filter(struct uprobe_consumer *uc,
+                               enum uprobe_filter_ctx ctx, struct mm_struct *mm)
+{
+       struct trace_uprobe *tu;
+       int ret;
+
+       tu = container_of(uc, struct trace_uprobe, consumer);
+       read_lock(&tu->filter.rwlock);
+       ret = __uprobe_perf_filter(&tu->filter, mm);
+       read_unlock(&tu->filter.rwlock);
+
+       return ret;
+}
+
 /* uprobe profile handler */
-static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
+static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
 {
        struct ftrace_event_call *call = &tu->call;
        struct uprobe_trace_entry_head *entry;
@@ -652,11 +762,14 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
        int size, __size, i;
        int rctx;
 
+       if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
+               return UPROBE_HANDLER_REMOVE;
+
        __size = sizeof(*entry) + tu->size;
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
        if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
-               return;
+               return 0;
 
        preempt_disable();
 
@@ -664,7 +777,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
        if (!entry)
                goto out;
 
-       entry->ip = uprobe_get_swbp_addr(task_pt_regs(current));
+       entry->ip = instruction_pointer(task_pt_regs(current));
        data = (u8 *)&entry[1];
        for (i = 0; i < tu->nr_args; i++)
                call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
@@ -674,6 +787,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
 
  out:
        preempt_enable();
+       return 0;
 }
 #endif /* CONFIG_PERF_EVENTS */
 
@@ -684,7 +798,7 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
 
        switch (type) {
        case TRACE_REG_REGISTER:
-               return probe_event_enable(tu, TP_FLAG_TRACE);
+               return probe_event_enable(tu, TP_FLAG_TRACE, NULL);
 
        case TRACE_REG_UNREGISTER:
                probe_event_disable(tu, TP_FLAG_TRACE);
@@ -692,11 +806,18 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
 
 #ifdef CONFIG_PERF_EVENTS
        case TRACE_REG_PERF_REGISTER:
-               return probe_event_enable(tu, TP_FLAG_PROFILE);
+               return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter);
 
        case TRACE_REG_PERF_UNREGISTER:
                probe_event_disable(tu, TP_FLAG_PROFILE);
                return 0;
+
+       case TRACE_REG_PERF_OPEN:
+               return uprobe_perf_open(tu, data);
+
+       case TRACE_REG_PERF_CLOSE:
+               return uprobe_perf_close(tu, data);
+
 #endif
        default:
                return 0;
@@ -706,22 +827,20 @@ int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
 
 static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
 {
-       struct uprobe_trace_consumer *utc;
        struct trace_uprobe *tu;
+       int ret = 0;
 
-       utc = container_of(con, struct uprobe_trace_consumer, cons);
-       tu = utc->tu;
-       if (!tu || tu->consumer != utc)
-               return 0;
+       tu = container_of(con, struct trace_uprobe, consumer);
+       tu->nhit++;
 
        if (tu->flags & TP_FLAG_TRACE)
-               uprobe_trace_func(tu, regs);
+               ret |= uprobe_trace_func(tu, regs);
 
 #ifdef CONFIG_PERF_EVENTS
        if (tu->flags & TP_FLAG_PROFILE)
-               uprobe_perf_func(tu, regs);
+               ret |= uprobe_perf_func(tu, regs);
 #endif
-       return 0;
+       return ret;
 }
 
 static struct trace_event_functions uprobe_funcs = {
index 625df0b44690a67a1ca6d5f6e7f264fc1ac28fb5..a1dd9a1b1327e9121265cb681b4b483a375b163a 100644 (file)
@@ -32,6 +32,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
 {
        const struct cred *tcred;
        struct timespec uptime, ts;
+       cputime_t utime, stime, utimescaled, stimescaled;
        u64 ac_etime;
 
        BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
@@ -65,10 +66,15 @@ void bacct_add_tsk(struct user_namespace *user_ns,
        stats->ac_ppid   = pid_alive(tsk) ?
                task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
        rcu_read_unlock();
-       stats->ac_utime = cputime_to_usecs(tsk->utime);
-       stats->ac_stime = cputime_to_usecs(tsk->stime);
-       stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled);
-       stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled);
+
+       task_cputime(tsk, &utime, &stime);
+       stats->ac_utime = cputime_to_usecs(utime);
+       stats->ac_stime = cputime_to_usecs(stime);
+
+       task_cputime_scaled(tsk, &utimescaled, &stimescaled);
+       stats->ac_utimescaled = cputime_to_usecs(utimescaled);
+       stats->ac_stimescaled = cputime_to_usecs(stimescaled);
+
        stats->ac_minflt = tsk->min_flt;
        stats->ac_majflt = tsk->maj_flt;
 
@@ -115,11 +121,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
 #undef KB
 #undef MB
 
-/**
- * acct_update_integrals - update mm integral fields in task_struct
- * @tsk: task_struct for accounting
- */
-void acct_update_integrals(struct task_struct *tsk)
+static void __acct_update_integrals(struct task_struct *tsk,
+                                   cputime_t utime, cputime_t stime)
 {
        if (likely(tsk->mm)) {
                cputime_t time, dtime;
@@ -128,7 +131,7 @@ void acct_update_integrals(struct task_struct *tsk)
                u64 delta;
 
                local_irq_save(flags);
-               time = tsk->stime + tsk->utime;
+               time = stime + utime;
                dtime = time - tsk->acct_timexpd;
                jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
                delta = value.tv_sec;
@@ -144,6 +147,27 @@ void acct_update_integrals(struct task_struct *tsk)
        }
 }
 
+/**
+ * acct_update_integrals - update mm integral fields in task_struct
+ * @tsk: task_struct for accounting
+ */
+void acct_update_integrals(struct task_struct *tsk)
+{
+       cputime_t utime, stime;
+
+       task_cputime(tsk, &utime, &stime);
+       __acct_update_integrals(tsk, utime, stime);
+}
+
+/**
+ * acct_account_cputime - update mm integral after cputime update
+ * @tsk: task_struct for accounting
+ */
+void acct_account_cputime(struct task_struct *tsk)
+{
+       __acct_update_integrals(tsk, tsk->utime, tsk->stime);
+}
+
 /**
  * acct_clear_integrals - clear the mm integral fields in task_struct
  * @tsk: task_struct whose accounting fields are cleared
index 75a2ab3d0b0208dfa51e40339ffd00206622732e..27689422aa9284274f3514286dfafc6da04b2880 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/sysctl.h>
 #include <linux/smpboot.h>
+#include <linux/sched/rt.h>
 
 #include <asm/irq_regs.h>
 #include <linux/kvm_para.h>
index fbc6576a83c3e6837df8f9912963aadad2860a06..f4feacad3812e38e44d99a095d4eaa73185c5d10 100644 (file)
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
 #include <linux/idr.h>
+#include <linux/hashtable.h>
 
-#include "workqueue_sched.h"
+#include "workqueue_internal.h"
 
 enum {
        /*
-        * global_cwq flags
+        * worker_pool flags
         *
-        * A bound gcwq is either associated or disassociated with its CPU.
+        * A bound pool is either associated or disassociated with its CPU.
         * While associated (!DISASSOCIATED), all workers are bound to the
         * CPU and none has %WORKER_UNBOUND set and concurrency management
         * is in effect.
         *
         * While DISASSOCIATED, the cpu may be offline and all workers have
         * %WORKER_UNBOUND set and concurrency management disabled, and may
-        * be executing on any CPU.  The gcwq behaves as an unbound one.
+        * be executing on any CPU.  The pool behaves as an unbound one.
         *
         * Note that DISASSOCIATED can be flipped only while holding
-        * assoc_mutex of all pools on the gcwq to avoid changing binding
-        * state while create_worker() is in progress.
+        * assoc_mutex to avoid changing binding state while
+        * create_worker() is in progress.
         */
-       GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
-       GCWQ_FREEZING           = 1 << 1,       /* freeze in progress */
-
-       /* pool flags */
        POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
        POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
+       POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
+       POOL_FREEZING           = 1 << 3,       /* freeze in progress */
 
        /* worker flags */
        WORKER_STARTED          = 1 << 0,       /* started */
@@ -79,11 +78,9 @@ enum {
        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_UNBOUND |
                                  WORKER_CPU_INTENSIVE,
 
-       NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
+       NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
 
        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
-       BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
-       BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
 
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
@@ -111,48 +108,24 @@ enum {
  * P: Preemption protected.  Disabling preemption is enough and should
  *    only be modified and accessed from the local cpu.
  *
- * L: gcwq->lock protected.  Access with gcwq->lock held.
+ * L: pool->lock protected.  Access with pool->lock held.
  *
- * X: During normal operation, modification requires gcwq->lock and
- *    should be done only from local cpu.  Either disabling preemption
- *    on local cpu or grabbing gcwq->lock is enough for read access.
- *    If GCWQ_DISASSOCIATED is set, it's identical to L.
+ * X: During normal operation, modification requires pool->lock and should
+ *    be done only from local cpu.  Either disabling preemption on local
+ *    cpu or grabbing pool->lock is enough for read access.  If
+ *    POOL_DISASSOCIATED is set, it's identical to L.
  *
  * F: wq->flush_mutex protected.
  *
  * W: workqueue_lock protected.
  */
 
-struct global_cwq;
-struct worker_pool;
-
-/*
- * The poor guys doing the actual heavy lifting.  All on-duty workers
- * are either serving the manager role, on idle list or on busy hash.
- */
-struct worker {
-       /* on idle list while idle, on busy hash table while busy */
-       union {
-               struct list_head        entry;  /* L: while idle */
-               struct hlist_node       hentry; /* L: while busy */
-       };
-
-       struct work_struct      *current_work;  /* L: work being processed */
-       struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
-       struct list_head        scheduled;      /* L: scheduled works */
-       struct task_struct      *task;          /* I: worker task */
-       struct worker_pool      *pool;          /* I: the associated pool */
-       /* 64 bytes boundary on 64bit, 32 on 32bit */
-       unsigned long           last_active;    /* L: last active timestamp */
-       unsigned int            flags;          /* X: flags */
-       int                     id;             /* I: worker id */
-
-       /* for rebinding worker to CPU */
-       struct work_struct      rebind_work;    /* L: for busy worker */
-};
+/* struct worker is defined in workqueue_internal.h */
 
 struct worker_pool {
-       struct global_cwq       *gcwq;          /* I: the owning gcwq */
+       spinlock_t              lock;           /* the pool lock */
+       unsigned int            cpu;            /* I: the associated cpu */
+       int                     id;             /* I: pool ID */
        unsigned int            flags;          /* X: flags */
 
        struct list_head        worklist;       /* L: list of pending works */
@@ -165,34 +138,28 @@ struct worker_pool {
        struct timer_list       idle_timer;     /* L: worker idle timeout */
        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 
-       struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCIATED */
-       struct ida              worker_ida;     /* L: for worker IDs */
-};
-
-/*
- * Global per-cpu workqueue.  There's one and only one for each cpu
- * and all works are queued and processed here regardless of their
- * target workqueues.
- */
-struct global_cwq {
-       spinlock_t              lock;           /* the gcwq lock */
-       unsigned int            cpu;            /* I: the associated cpu */
-       unsigned int            flags;          /* L: GCWQ_* flags */
-
-       /* workers are chained either in busy_hash or pool idle_list */
-       struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
+       /* workers are chained either in busy_hash or idle_list */
+       DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
                                                /* L: hash of busy workers */
 
-       struct worker_pool      pools[NR_WORKER_POOLS];
-                                               /* normal and highpri pools */
+       struct mutex            assoc_mutex;    /* protect POOL_DISASSOCIATED */
+       struct ida              worker_ida;     /* L: for worker IDs */
+
+       /*
+        * The current concurrency level.  As it's likely to be accessed
+        * from other CPUs during try_to_wake_up(), put it in a separate
+        * cacheline.
+        */
+       atomic_t                nr_running ____cacheline_aligned_in_smp;
 } ____cacheline_aligned_in_smp;
 
 /*
- * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
- * work_struct->data are used for flags and thus cwqs need to be
- * aligned at two's power of the number of flag bits.
+ * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
+ * of work_struct->data are used for flags and the remaining high bits
+ * point to the pwq; thus, pwqs need to be aligned at two's power of the
+ * number of flag bits.
  */
-struct cpu_workqueue_struct {
+struct pool_workqueue {
        struct worker_pool      *pool;          /* I: the associated pool */
        struct workqueue_struct *wq;            /* I: the owning workqueue */
        int                     work_color;     /* L: current color */
@@ -241,16 +208,16 @@ typedef unsigned long mayday_mask_t;
 struct workqueue_struct {
        unsigned int            flags;          /* W: WQ_* flags */
        union {
-               struct cpu_workqueue_struct __percpu    *pcpu;
-               struct cpu_workqueue_struct             *single;
+               struct pool_workqueue __percpu          *pcpu;
+               struct pool_workqueue                   *single;
                unsigned long                           v;
-       } cpu_wq;                               /* I: cwq's */
+       } pool_wq;                              /* I: pwq's */
        struct list_head        list;           /* W: list of all workqueues */
 
        struct mutex            flush_mutex;    /* protects wq flushing */
        int                     work_color;     /* F: current work color */
        int                     flush_color;    /* F: current flush color */
-       atomic_t                nr_cwqs_to_flush; /* flush in progress */
+       atomic_t                nr_pwqs_to_flush; /* flush in progress */
        struct wq_flusher       *first_flusher; /* F: first flusher */
        struct list_head        flusher_queue;  /* F: flush waiters */
        struct list_head        flusher_overflow; /* F: flush overflow list */
@@ -259,7 +226,7 @@ struct workqueue_struct {
        struct worker           *rescuer;       /* I: rescue worker */
 
        int                     nr_drainers;    /* W: drain in progress */
-       int                     saved_max_active; /* W: saved cwq max_active */
+       int                     saved_max_active; /* W: saved pwq max_active */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
@@ -280,16 +247,15 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
 
-#define for_each_worker_pool(pool, gcwq)                               \
-       for ((pool) = &(gcwq)->pools[0];                                \
-            (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
+#define for_each_std_worker_pool(pool, cpu)                            \
+       for ((pool) = &std_worker_pools(cpu)[0];                        \
+            (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
 
-#define for_each_busy_worker(worker, i, pos, gcwq)                     \
-       for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
-               hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
+#define for_each_busy_worker(worker, i, pos, pool)                     \
+       hash_for_each(pool->busy_hash, i, pos, worker, hentry)
 
-static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
-                                 unsigned int sw)
+static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
+                               unsigned int sw)
 {
        if (cpu < nr_cpu_ids) {
                if (sw & 1) {
@@ -300,42 +266,42 @@ static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
                if (sw & 2)
                        return WORK_CPU_UNBOUND;
        }
-       return WORK_CPU_NONE;
+       return WORK_CPU_END;
 }
 
-static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
-                               struct workqueue_struct *wq)
+static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
+                                struct workqueue_struct *wq)
 {
-       return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
+       return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
 }
 
 /*
  * CPU iterators
  *
- * An extra gcwq is defined for an invalid cpu number
+ * An extra cpu number is defined using an invalid cpu number
  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
- * specific CPU.  The following iterators are similar to
- * for_each_*_cpu() iterators but also considers the unbound gcwq.
+ * specific CPU.  The following iterators are similar to for_each_*_cpu()
+ * iterators but also considers the unbound CPU.
  *
- * for_each_gcwq_cpu()         : possible CPUs + WORK_CPU_UNBOUND
- * for_each_online_gcwq_cpu()  : online CPUs + WORK_CPU_UNBOUND
- * for_each_cwq_cpu()          : possible CPUs for bound workqueues,
+ * for_each_wq_cpu()           : possible CPUs + WORK_CPU_UNBOUND
+ * for_each_online_wq_cpu()    : online CPUs + WORK_CPU_UNBOUND
+ * for_each_pwq_cpu()          : possible CPUs for bound workqueues,
  *                               WORK_CPU_UNBOUND for unbound workqueues
  */
-#define for_each_gcwq_cpu(cpu)                                         \
-       for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
+#define for_each_wq_cpu(cpu)                                           \
+       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3);           \
+            (cpu) < WORK_CPU_END;                                      \
+            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))
 
-#define for_each_online_gcwq_cpu(cpu)                                  \
-       for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
+#define for_each_online_wq_cpu(cpu)                                    \
+       for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3);             \
+            (cpu) < WORK_CPU_END;                                      \
+            (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
 
-#define for_each_cwq_cpu(cpu, wq)                                      \
-       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
-            (cpu) < WORK_CPU_NONE;                                     \
-            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
+#define for_each_pwq_cpu(cpu, wq)                                      \
+       for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq));       \
+            (cpu) < WORK_CPU_END;                                      \
+            (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
@@ -459,57 +425,69 @@ static LIST_HEAD(workqueues);
 static bool workqueue_freezing;                /* W: have wqs started freezing? */
 
 /*
- * The almighty global cpu workqueues.  nr_running is the only field
- * which is expected to be used frequently by other cpus via
- * try_to_wake_up().  Put it in a separate cacheline.
+ * The CPU and unbound standard worker pools.  The unbound ones have
+ * POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
  */
-static DEFINE_PER_CPU(struct global_cwq, global_cwq);
-static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
+                                    cpu_std_worker_pools);
+static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
 
-/*
- * Global cpu workqueue and nr_running counter for unbound gcwq.  The
- * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
- * workers have WORKER_UNBOUND set.
- */
-static struct global_cwq unbound_global_cwq;
-static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
-       [0 ... NR_WORKER_POOLS - 1]     = ATOMIC_INIT(0),       /* always 0 */
-};
+/* idr of all pools */
+static DEFINE_MUTEX(worker_pool_idr_mutex);
+static DEFINE_IDR(worker_pool_idr);
 
 static int worker_thread(void *__worker);
 
-static int worker_pool_pri(struct worker_pool *pool)
+static struct worker_pool *std_worker_pools(int cpu)
 {
-       return pool - pool->gcwq->pools;
+       if (cpu != WORK_CPU_UNBOUND)
+               return per_cpu(cpu_std_worker_pools, cpu);
+       else
+               return unbound_std_worker_pools;
 }
 
-static struct global_cwq *get_gcwq(unsigned int cpu)
+static int std_worker_pool_pri(struct worker_pool *pool)
 {
-       if (cpu != WORK_CPU_UNBOUND)
-               return &per_cpu(global_cwq, cpu);
-       else
-               return &unbound_global_cwq;
+       return pool - std_worker_pools(pool->cpu);
 }
 
-static atomic_t *get_pool_nr_running(struct worker_pool *pool)
+/* allocate ID and assign it to @pool */
+static int worker_pool_assign_id(struct worker_pool *pool)
 {
-       int cpu = pool->gcwq->cpu;
-       int idx = worker_pool_pri(pool);
+       int ret;
 
-       if (cpu != WORK_CPU_UNBOUND)
-               return &per_cpu(pool_nr_running, cpu)[idx];
-       else
-               return &unbound_pool_nr_running[idx];
+       mutex_lock(&worker_pool_idr_mutex);
+       idr_pre_get(&worker_pool_idr, GFP_KERNEL);
+       ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
+       mutex_unlock(&worker_pool_idr_mutex);
+
+       return ret;
 }
 
-static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
-                                           struct workqueue_struct *wq)
+/*
+ * Lookup worker_pool by id.  The idr currently is built during boot and
+ * never modified.  Don't worry about locking for now.
+ */
+static struct worker_pool *worker_pool_by_id(int pool_id)
+{
+       return idr_find(&worker_pool_idr, pool_id);
+}
+
+static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
+{
+       struct worker_pool *pools = std_worker_pools(cpu);
+
+       return &pools[highpri];
+}
+
+static struct pool_workqueue *get_pwq(unsigned int cpu,
+                                     struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND)) {
                if (likely(cpu < nr_cpu_ids))
-                       return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
+                       return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
        } else if (likely(cpu == WORK_CPU_UNBOUND))
-               return wq->cpu_wq.single;
+               return wq->pool_wq.single;
        return NULL;
 }
 
@@ -530,19 +508,19 @@ static int work_next_color(int color)
 }
 
 /*
- * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
- * contain the pointer to the queued cwq.  Once execution starts, the flag
- * is cleared and the high bits contain OFFQ flags and CPU number.
+ * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
+ * contain the pointer to the queued pwq.  Once execution starts, the flag
+ * is cleared and the high bits contain OFFQ flags and pool ID.
  *
- * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
- * and clear_work_data() can be used to set the cwq, cpu or clear
+ * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
+ * and clear_work_data() can be used to set the pwq, pool or clear
  * work->data.  These functions should only be called while the work is
  * owned - ie. while the PENDING bit is set.
  *
- * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
- * a work.  gcwq is available once the work has been queued anywhere after
- * initialization until it is sync canceled.  cwq is available only while
- * the work item is queued.
+ * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
+ * corresponding to a work.  Pool is available once the work has been
+ * queued anywhere after initialization until it is sync canceled.  pwq is
+ * available only while the work item is queued.
  *
  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
  * canceled.  While being canceled, a work item may have its PENDING set
@@ -556,16 +534,22 @@ static inline void set_work_data(struct work_struct *work, unsigned long data,
        atomic_long_set(&work->data, data | flags | work_static(work));
 }
 
-static void set_work_cwq(struct work_struct *work,
-                        struct cpu_workqueue_struct *cwq,
+static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
                         unsigned long extra_flags)
 {
-       set_work_data(work, (unsigned long)cwq,
-                     WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
+       set_work_data(work, (unsigned long)pwq,
+                     WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 }
 
-static void set_work_cpu_and_clear_pending(struct work_struct *work,
-                                          unsigned int cpu)
+static void set_work_pool_and_keep_pending(struct work_struct *work,
+                                          int pool_id)
+{
+       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
+                     WORK_STRUCT_PENDING);
+}
+
+static void set_work_pool_and_clear_pending(struct work_struct *work,
+                                           int pool_id)
 {
        /*
         * The following wmb is paired with the implied mb in
@@ -574,67 +558,92 @@ static void set_work_cpu_and_clear_pending(struct work_struct *work,
         * owner.
         */
        smp_wmb();
-       set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
+       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 }
 
 static void clear_work_data(struct work_struct *work)
 {
-       smp_wmb();      /* see set_work_cpu_and_clear_pending() */
-       set_work_data(work, WORK_STRUCT_NO_CPU, 0);
+       smp_wmb();      /* see set_work_pool_and_clear_pending() */
+       set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 }
 
-static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
+static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
 
-       if (data & WORK_STRUCT_CWQ)
+       if (data & WORK_STRUCT_PWQ)
                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
        else
                return NULL;
 }
 
-static struct global_cwq *get_work_gcwq(struct work_struct *work)
+/**
+ * get_work_pool - return the worker_pool a given work was associated with
+ * @work: the work item of interest
+ *
+ * Return the worker_pool @work was last associated with.  %NULL if none.
+ */
+static struct worker_pool *get_work_pool(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
-       unsigned int cpu;
+       struct worker_pool *pool;
+       int pool_id;
 
-       if (data & WORK_STRUCT_CWQ)
-               return ((struct cpu_workqueue_struct *)
-                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
+       if (data & WORK_STRUCT_PWQ)
+               return ((struct pool_workqueue *)
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 
-       cpu = data >> WORK_OFFQ_CPU_SHIFT;
-       if (cpu == WORK_CPU_NONE)
+       pool_id = data >> WORK_OFFQ_POOL_SHIFT;
+       if (pool_id == WORK_OFFQ_POOL_NONE)
                return NULL;
 
-       BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
-       return get_gcwq(cpu);
+       pool = worker_pool_by_id(pool_id);
+       WARN_ON_ONCE(!pool);
+       return pool;
+}
+
+/**
+ * get_work_pool_id - return the worker pool ID a given work is associated with
+ * @work: the work item of interest
+ *
+ * Return the worker_pool ID @work was last associated with.
+ * %WORK_OFFQ_POOL_NONE if none.
+ */
+static int get_work_pool_id(struct work_struct *work)
+{
+       unsigned long data = atomic_long_read(&work->data);
+
+       if (data & WORK_STRUCT_PWQ)
+               return ((struct pool_workqueue *)
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
+
+       return data >> WORK_OFFQ_POOL_SHIFT;
 }
 
 static void mark_work_canceling(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_work_gcwq(work);
-       unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
+       unsigned long pool_id = get_work_pool_id(work);
 
-       set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
-                     WORK_STRUCT_PENDING);
+       pool_id <<= WORK_OFFQ_POOL_SHIFT;
+       set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 }
 
 static bool work_is_canceling(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
 
-       return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
+       return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 }
 
 /*
  * Policy functions.  These define the policies on how the global worker
  * pools are managed.  Unless noted otherwise, these functions assume that
- * they're being called with gcwq->lock held.
+ * they're being called with pool->lock held.
  */
 
 static bool __need_more_worker(struct worker_pool *pool)
 {
-       return !atomic_read(get_pool_nr_running(pool));
+       return !atomic_read(&pool->nr_running);
 }
 
 /*
@@ -642,7 +651,7 @@ static bool __need_more_worker(struct worker_pool *pool)
  * running workers.
  *
  * Note that, because unbound workers never contribute to nr_running, this
- * function will always return %true for unbound gcwq as long as the
+ * function will always return %true for unbound pools as long as the
  * worklist isn't empty.
  */
 static bool need_more_worker(struct worker_pool *pool)
@@ -659,9 +668,8 @@ static bool may_start_working(struct worker_pool *pool)
 /* Do I need to keep working?  Called from currently running workers. */
 static bool keep_working(struct worker_pool *pool)
 {
-       atomic_t *nr_running = get_pool_nr_running(pool);
-
-       return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
+       return !list_empty(&pool->worklist) &&
+               atomic_read(&pool->nr_running) <= 1;
 }
 
 /* Do we need a new worker?  Called from manager. */
@@ -714,7 +722,7 @@ static struct worker *first_worker(struct worker_pool *pool)
  * Wake up the first idle worker of @pool.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void wake_up_worker(struct worker_pool *pool)
 {
@@ -740,8 +748,8 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
        struct worker *worker = kthread_data(task);
 
        if (!(worker->flags & WORKER_NOT_RUNNING)) {
-               WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu);
-               atomic_inc(get_pool_nr_running(worker->pool));
+               WARN_ON_ONCE(worker->pool->cpu != cpu);
+               atomic_inc(&worker->pool->nr_running);
        }
 }
 
@@ -764,12 +772,18 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
                                       unsigned int cpu)
 {
        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
-       struct worker_pool *pool = worker->pool;
-       atomic_t *nr_running = get_pool_nr_running(pool);
+       struct worker_pool *pool;
 
+       /*
+        * Rescuers, which may not have all the fields set up like normal
+        * workers, also reach here, let's not access anything before
+        * checking NOT_RUNNING.
+        */
        if (worker->flags & WORKER_NOT_RUNNING)
                return NULL;
 
+       pool = worker->pool;
+
        /* this can only happen on the local cpu */
        BUG_ON(cpu != raw_smp_processor_id());
 
@@ -781,10 +795,11 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
         * NOT_RUNNING is clear.  This means that we're bound to and
         * running on the local cpu w/ rq lock held and preemption
         * disabled, which in turn means that none else could be
-        * manipulating idle_list, so dereferencing idle_list without gcwq
+        * manipulating idle_list, so dereferencing idle_list without pool
         * lock is safe.
         */
-       if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
+       if (atomic_dec_and_test(&pool->nr_running) &&
+           !list_empty(&pool->worklist))
                to_wakeup = first_worker(pool);
        return to_wakeup ? to_wakeup->task : NULL;
 }
@@ -800,7 +815,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
  * woken up.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock)
+ * spin_lock_irq(pool->lock)
  */
 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
                                    bool wakeup)
@@ -816,14 +831,12 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
         */
        if ((flags & WORKER_NOT_RUNNING) &&
            !(worker->flags & WORKER_NOT_RUNNING)) {
-               atomic_t *nr_running = get_pool_nr_running(pool);
-
                if (wakeup) {
-                       if (atomic_dec_and_test(nr_running) &&
+                       if (atomic_dec_and_test(&pool->nr_running) &&
                            !list_empty(&pool->worklist))
                                wake_up_worker(pool);
                } else
-                       atomic_dec(nr_running);
+                       atomic_dec(&pool->nr_running);
        }
 
        worker->flags |= flags;
@@ -837,7 +850,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
  * Clear @flags in @worker->flags and adjust nr_running accordingly.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock)
+ * spin_lock_irq(pool->lock)
  */
 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 {
@@ -855,87 +868,56 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
         */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
-                       atomic_inc(get_pool_nr_running(pool));
+                       atomic_inc(&pool->nr_running);
 }
 
 /**
- * busy_worker_head - return the busy hash head for a work
- * @gcwq: gcwq of interest
- * @work: work to be hashed
- *
- * Return hash head of @gcwq for @work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to the hash head.
- */
-static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
-                                          struct work_struct *work)
-{
-       const int base_shift = ilog2(sizeof(struct work_struct));
-       unsigned long v = (unsigned long)work;
-
-       /* simple shift and fold hash, do we need something better? */
-       v >>= base_shift;
-       v += v >> BUSY_WORKER_HASH_ORDER;
-       v &= BUSY_WORKER_HASH_MASK;
-
-       return &gcwq->busy_hash[v];
-}
-
-/**
- * __find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @bwh: hash head as returned by busy_worker_head()
+ * find_worker_executing_work - find worker which is executing a work
+ * @pool: pool of interest
  * @work: work to find worker for
  *
- * Find a worker which is executing @work on @gcwq.  @bwh should be
- * the hash head obtained by calling busy_worker_head() with the same
- * work.
+ * Find a worker which is executing @work on @pool by searching
+ * @pool->busy_hash which is keyed by the address of @work.  For a worker
+ * to match, its current execution should match the address of @work and
+ * its work function.  This is to avoid unwanted dependency between
+ * unrelated work executions through a work item being recycled while still
+ * being executed.
+ *
+ * This is a bit tricky.  A work item may be freed once its execution
+ * starts and nothing prevents the freed area from being recycled for
+ * another work item.  If the same work item address ends up being reused
+ * before the original execution finishes, workqueue will identify the
+ * recycled work item as currently executing and make it wait until the
+ * current execution finishes, introducing an unwanted dependency.
+ *
+ * This function checks the work item address, work function and workqueue
+ * to avoid false positives.  Note that this isn't complete as one may
+ * construct a work function which can introduce dependency onto itself
+ * through a recycled work item.  Well, if somebody wants to shoot oneself
+ * in the foot that badly, there's only so much we can do, and if such
+ * deadlock actually occurs, it should be easy to locate the culprit work
+ * function.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  *
  * RETURNS:
  * Pointer to worker which is executing @work if found, NULL
  * otherwise.
  */
-static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
-                                                  struct hlist_head *bwh,
-                                                  struct work_struct *work)
+static struct worker *find_worker_executing_work(struct worker_pool *pool,
+                                                struct work_struct *work)
 {
        struct worker *worker;
        struct hlist_node *tmp;
 
-       hlist_for_each_entry(worker, tmp, bwh, hentry)
-               if (worker->current_work == work)
+       hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
+                              (unsigned long)work)
+               if (worker->current_work == work &&
+                   worker->current_func == work->func)
                        return worker;
-       return NULL;
-}
 
-/**
- * find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @gcwq.  This function is
- * identical to __find_worker_executing_work() except that this
- * function calculates @bwh itself.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
- * otherwise.
- */
-static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
-                                                struct work_struct *work)
-{
-       return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
-                                           work);
+       return NULL;
 }
 
 /**
@@ -953,7 +935,7 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
  * nested inside outer list_for_each_entry_safe().
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void move_linked_works(struct work_struct *work, struct list_head *head,
                              struct work_struct **nextp)
@@ -979,67 +961,67 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
                *nextp = n;
 }
 
-static void cwq_activate_delayed_work(struct work_struct *work)
+static void pwq_activate_delayed_work(struct work_struct *work)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+       struct pool_workqueue *pwq = get_work_pwq(work);
 
        trace_workqueue_activate_work(work);
-       move_linked_works(work, &cwq->pool->worklist, NULL);
+       move_linked_works(work, &pwq->pool->worklist, NULL);
        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       cwq->nr_active++;
+       pwq->nr_active++;
 }
 
-static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
+static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
 {
-       struct work_struct *work = list_first_entry(&cwq->delayed_works,
+       struct work_struct *work = list_first_entry(&pwq->delayed_works,
                                                    struct work_struct, entry);
 
-       cwq_activate_delayed_work(work);
+       pwq_activate_delayed_work(work);
 }
 
 /**
- * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
- * @cwq: cwq of interest
+ * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
+ * @pwq: pwq of interest
  * @color: color of work which left the queue
  *
  * A work either has completed or is removed from pending queue,
- * decrement nr_in_flight of its cwq and handle workqueue flushing.
+ * decrement nr_in_flight of its pwq and handle workqueue flushing.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
 {
        /* ignore uncolored works */
        if (color == WORK_NO_COLOR)
                return;
 
-       cwq->nr_in_flight[color]--;
+       pwq->nr_in_flight[color]--;
 
-       cwq->nr_active--;
-       if (!list_empty(&cwq->delayed_works)) {
+       pwq->nr_active--;
+       if (!list_empty(&pwq->delayed_works)) {
                /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
+               if (pwq->nr_active < pwq->max_active)
+                       pwq_activate_first_delayed(pwq);
        }
 
        /* is flush in progress and are we at the flushing tip? */
-       if (likely(cwq->flush_color != color))
+       if (likely(pwq->flush_color != color))
                return;
 
        /* are there still in-flight works? */
-       if (cwq->nr_in_flight[color])
+       if (pwq->nr_in_flight[color])
                return;
 
-       /* this cwq is done, clear flush_color */
-       cwq->flush_color = -1;
+       /* this pwq is done, clear flush_color */
+       pwq->flush_color = -1;
 
        /*
-        * If this was the last cwq, wake up the first flusher.  It
+        * If this was the last pwq, wake up the first flusher.  It
         * will handle the rest.
         */
-       if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
-               complete(&cwq->wq->first_flusher->done);
+       if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
+               complete(&pwq->wq->first_flusher->done);
 }
 
 /**
@@ -1070,7 +1052,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                               unsigned long *flags)
 {
-       struct global_cwq *gcwq;
+       struct worker_pool *pool;
+       struct pool_workqueue *pwq;
 
        local_irq_save(*flags);
 
@@ -1095,41 +1078,43 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
         * The queueing is in progress, or it is already queued. Try to
         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
         */
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
+       pool = get_work_pool(work);
+       if (!pool)
                goto fail;
 
-       spin_lock(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
+       spin_lock(&pool->lock);
+       /*
+        * work->data is guaranteed to point to pwq only while the work
+        * item is queued on pwq->wq, and both updating work->data to point
+        * to pwq on queueing and to pool on dequeueing are done under
+        * pwq->pool->lock.  This in turn guarantees that, if work->data
+        * points to pwq which is associated with a locked pool, the work
+        * item is currently queued on that pool.
+        */
+       pwq = get_work_pwq(work);
+       if (pwq && pwq->pool == pool) {
+               debug_work_deactivate(work);
+
                /*
-                * This work is queued, but perhaps we locked the wrong gcwq.
-                * In that case we must see the new value after rmb(), see
-                * insert_work()->wmb().
+                * A delayed work item cannot be grabbed directly because
+                * it might have linked NO_COLOR work items which, if left
+                * on the delayed_list, will confuse pwq->nr_active
+                * management later on and cause stall.  Make sure the work
+                * item is activated before grabbing.
                 */
-               smp_rmb();
-               if (gcwq == get_work_gcwq(work)) {
-                       debug_work_deactivate(work);
+               if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
+                       pwq_activate_delayed_work(work);
 
-                       /*
-                        * A delayed work item cannot be grabbed directly
-                        * because it might have linked NO_COLOR work items
-                        * which, if left on the delayed_list, will confuse
-                        * cwq->nr_active management later on and cause
-                        * stall.  Make sure the work item is activated
-                        * before grabbing.
-                        */
-                       if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
-                               cwq_activate_delayed_work(work);
+               list_del_init(&work->entry);
+               pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
 
-                       list_del_init(&work->entry);
-                       cwq_dec_nr_in_flight(get_work_cwq(work),
-                               get_work_color(work));
+               /* work->data points to pwq iff queued, point to pool */
+               set_work_pool_and_keep_pending(work, pool->id);
 
-                       spin_unlock(&gcwq->lock);
-                       return 1;
-               }
+               spin_unlock(&pool->lock);
+               return 1;
        }
-       spin_unlock(&gcwq->lock);
+       spin_unlock(&pool->lock);
 fail:
        local_irq_restore(*flags);
        if (work_is_canceling(work))
@@ -1139,33 +1124,25 @@ fail:
 }
 
 /**
- * insert_work - insert a work into gcwq
- * @cwq: cwq @work belongs to
+ * insert_work - insert a work into a pool
+ * @pwq: pwq @work belongs to
  * @work: work to insert
  * @head: insertion point
  * @extra_flags: extra WORK_STRUCT_* flags to set
  *
- * Insert @work which belongs to @cwq into @gcwq after @head.
- * @extra_flags is or'd to work_struct flags.
+ * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
+ * work_struct flags.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void insert_work(struct cpu_workqueue_struct *cwq,
-                       struct work_struct *work, struct list_head *head,
-                       unsigned int extra_flags)
+static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
+                       struct list_head *head, unsigned int extra_flags)
 {
-       struct worker_pool *pool = cwq->pool;
+       struct worker_pool *pool = pwq->pool;
 
        /* we own @work, set data and link */
-       set_work_cwq(work, cwq, extra_flags);
-
-       /*
-        * Ensure that we get the right work->data if we see the
-        * result of list_add() below, see try_to_grab_pending().
-        */
-       smp_wmb();
-
+       set_work_pwq(work, pwq, extra_flags);
        list_add_tail(&work->entry, head);
 
        /*
@@ -1181,41 +1158,24 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
 
 /*
  * Test whether @work is being queued from another work executing on the
- * same workqueue.  This is rather expensive and should only be used from
- * cold paths.
+ * same workqueue.
  */
 static bool is_chained_work(struct workqueue_struct *wq)
 {
-       unsigned long flags;
-       unsigned int cpu;
-
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               struct worker *worker;
-               struct hlist_node *pos;
-               int i;
+       struct worker *worker;
 
-               spin_lock_irqsave(&gcwq->lock, flags);
-               for_each_busy_worker(worker, i, pos, gcwq) {
-                       if (worker->task != current)
-                               continue;
-                       spin_unlock_irqrestore(&gcwq->lock, flags);
-                       /*
-                        * I'm @worker, no locking necessary.  See if @work
-                        * is headed to the same workqueue.
-                        */
-                       return worker->current_cwq->wq == wq;
-               }
-               spin_unlock_irqrestore(&gcwq->lock, flags);
-       }
-       return false;
+       worker = current_wq_worker();
+       /*
+        * Return %true iff I'm a worker execuing a work item on @wq.  If
+        * I'm @worker, it's safe to dereference it without locking.
+        */
+       return worker && worker->current_pwq->wq == wq;
 }
 
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
-       struct global_cwq *gcwq;
-       struct cpu_workqueue_struct *cwq;
+       struct pool_workqueue *pwq;
        struct list_head *worklist;
        unsigned int work_flags;
        unsigned int req_cpu = cpu;
@@ -1235,9 +1195,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
            WARN_ON_ONCE(!is_chained_work(wq)))
                return;
 
-       /* determine gcwq to use */
+       /* determine the pwq to use */
        if (!(wq->flags & WQ_UNBOUND)) {
-               struct global_cwq *last_gcwq;
+               struct worker_pool *last_pool;
 
                if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
@@ -1248,55 +1208,54 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                 * work needs to be queued on that cpu to guarantee
                 * non-reentrancy.
                 */
-               gcwq = get_gcwq(cpu);
-               last_gcwq = get_work_gcwq(work);
+               pwq = get_pwq(cpu, wq);
+               last_pool = get_work_pool(work);
 
-               if (last_gcwq && last_gcwq != gcwq) {
+               if (last_pool && last_pool != pwq->pool) {
                        struct worker *worker;
 
-                       spin_lock(&last_gcwq->lock);
+                       spin_lock(&last_pool->lock);
 
-                       worker = find_worker_executing_work(last_gcwq, work);
+                       worker = find_worker_executing_work(last_pool, work);
 
-                       if (worker && worker->current_cwq->wq == wq)
-                               gcwq = last_gcwq;
-                       else {
+                       if (worker && worker->current_pwq->wq == wq) {
+                               pwq = get_pwq(last_pool->cpu, wq);
+                       else {
                                /* meh... not running there, queue here */
-                               spin_unlock(&last_gcwq->lock);
-                               spin_lock(&gcwq->lock);
+                               spin_unlock(&last_pool->lock);
+                               spin_lock(&pwq->pool->lock);
                        }
                } else {
-                       spin_lock(&gcwq->lock);
+                       spin_lock(&pwq->pool->lock);
                }
        } else {
-               gcwq = get_gcwq(WORK_CPU_UNBOUND);
-               spin_lock(&gcwq->lock);
+               pwq = get_pwq(WORK_CPU_UNBOUND, wq);
+               spin_lock(&pwq->pool->lock);
        }
 
-       /* gcwq determined, get cwq and queue */
-       cwq = get_cwq(gcwq->cpu, wq);
-       trace_workqueue_queue_work(req_cpu, cwq, work);
+       /* pwq determined, queue */
+       trace_workqueue_queue_work(req_cpu, pwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock(&gcwq->lock);
+               spin_unlock(&pwq->pool->lock);
                return;
        }
 
-       cwq->nr_in_flight[cwq->work_color]++;
-       work_flags = work_color_to_flags(cwq->work_color);
+       pwq->nr_in_flight[pwq->work_color]++;
+       work_flags = work_color_to_flags(pwq->work_color);
 
-       if (likely(cwq->nr_active < cwq->max_active)) {
+       if (likely(pwq->nr_active < pwq->max_active)) {
                trace_workqueue_activate_work(work);
-               cwq->nr_active++;
-               worklist = &cwq->pool->worklist;
+               pwq->nr_active++;
+               worklist = &pwq->pool->worklist;
        } else {
                work_flags |= WORK_STRUCT_DELAYED;
-               worklist = &cwq->delayed_works;
+               worklist = &pwq->delayed_works;
        }
 
-       insert_work(cwq, work, worklist, work_flags);
+       insert_work(pwq, work, worklist, work_flags);
 
-       spin_unlock(&gcwq->lock);
+       spin_unlock(&pwq->pool->lock);
 }
 
 /**
@@ -1347,19 +1306,17 @@ EXPORT_SYMBOL_GPL(queue_work);
 void delayed_work_timer_fn(unsigned long __data)
 {
        struct delayed_work *dwork = (struct delayed_work *)__data;
-       struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 
        /* should have been called from irqsafe timer with irq already off */
-       __queue_work(dwork->cpu, cwq->wq, &dwork->work);
+       __queue_work(dwork->cpu, dwork->wq, &dwork->work);
 }
-EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
+EXPORT_SYMBOL(delayed_work_timer_fn);
 
 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
                                struct delayed_work *dwork, unsigned long delay)
 {
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
-       unsigned int lcpu;
 
        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
                     timer->data != (unsigned long)dwork);
@@ -1379,30 +1336,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 
        timer_stats_timer_set_start_info(&dwork->timer);
 
-       /*
-        * This stores cwq for the moment, for the timer_fn.  Note that the
-        * work's gcwq is preserved to allow reentrance detection for
-        * delayed works.
-        */
-       if (!(wq->flags & WQ_UNBOUND)) {
-               struct global_cwq *gcwq = get_work_gcwq(work);
-
-               /*
-                * If we cannot get the last gcwq from @work directly,
-                * select the last CPU such that it avoids unnecessarily
-                * triggering non-reentrancy check in __queue_work().
-                */
-               lcpu = cpu;
-               if (gcwq)
-                       lcpu = gcwq->cpu;
-               if (lcpu == WORK_CPU_UNBOUND)
-                       lcpu = raw_smp_processor_id();
-       } else {
-               lcpu = WORK_CPU_UNBOUND;
-       }
-
-       set_work_cwq(work, get_cwq(lcpu, wq), 0);
-
+       dwork->wq = wq;
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
@@ -1519,12 +1453,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work);
  * necessary.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void worker_enter_idle(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
        BUG_ON(worker->flags & WORKER_IDLE);
        BUG_ON(!list_empty(&worker->entry) &&
@@ -1542,14 +1475,14 @@ static void worker_enter_idle(struct worker *worker)
                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 
        /*
-        * Sanity check nr_running.  Because gcwq_unbind_fn() releases
-        * gcwq->lock between setting %WORKER_UNBOUND and zapping
+        * Sanity check nr_running.  Because wq_unbind_fn() releases
+        * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
         */
-       WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
+       WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                     pool->nr_workers == pool->nr_idle &&
-                    atomic_read(get_pool_nr_running(pool)));
+                    atomic_read(&pool->nr_running));
 }
 
 /**
@@ -1559,7 +1492,7 @@ static void worker_enter_idle(struct worker *worker)
  * @worker is leaving idle state.  Update stats.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void worker_leave_idle(struct worker *worker)
 {
@@ -1572,7 +1505,7 @@ static void worker_leave_idle(struct worker *worker)
 }
 
 /**
- * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
+ * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
  * @worker: self
  *
  * Works which are scheduled while the cpu is online must at least be
@@ -1584,27 +1517,27 @@ static void worker_leave_idle(struct worker *worker)
  * themselves to the target cpu and may race with cpu going down or
  * coming online.  kthread_bind() can't be used because it may put the
  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
- * verbatim as it's best effort and blocking and gcwq may be
+ * verbatim as it's best effort and blocking and pool may be
  * [dis]associated in the meantime.
  *
- * This function tries set_cpus_allowed() and locks gcwq and verifies the
- * binding against %GCWQ_DISASSOCIATED which is set during
+ * This function tries set_cpus_allowed() and locks pool and verifies the
+ * binding against %POOL_DISASSOCIATED which is set during
  * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
  * enters idle state or fetches works without dropping lock, it can
  * guarantee the scheduling requirement described in the first paragraph.
  *
  * CONTEXT:
- * Might sleep.  Called without any lock but returns with gcwq->lock
+ * Might sleep.  Called without any lock but returns with pool->lock
  * held.
  *
  * RETURNS:
- * %true if the associated gcwq is online (@worker is successfully
+ * %true if the associated pool is online (@worker is successfully
  * bound), %false if offline.
  */
 static bool worker_maybe_bind_and_lock(struct worker *worker)
-__acquires(&gcwq->lock)
+__acquires(&pool->lock)
 {
-       struct global_cwq *gcwq = worker->pool->gcwq;
+       struct worker_pool *pool = worker->pool;
        struct task_struct *task = worker->task;
 
        while (true) {
@@ -1612,19 +1545,19 @@ __acquires(&gcwq->lock)
                 * The following call may fail, succeed or succeed
                 * without actually migrating the task to the cpu if
                 * it races with cpu hotunplug operation.  Verify
-                * against GCWQ_DISASSOCIATED.
+                * against POOL_DISASSOCIATED.
                 */
-               if (!(gcwq->flags & GCWQ_DISASSOCIATED))
-                       set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
+               if (!(pool->flags & POOL_DISASSOCIATED))
+                       set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
 
-               spin_lock_irq(&gcwq->lock);
-               if (gcwq->flags & GCWQ_DISASSOCIATED)
+               spin_lock_irq(&pool->lock);
+               if (pool->flags & POOL_DISASSOCIATED)
                        return false;
-               if (task_cpu(task) == gcwq->cpu &&
+               if (task_cpu(task) == pool->cpu &&
                    cpumask_equal(&current->cpus_allowed,
-                                 get_cpu_mask(gcwq->cpu)))
+                                 get_cpu_mask(pool->cpu)))
                        return true;
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
 
                /*
                 * We've raced with CPU hot[un]plug.  Give it a breather
@@ -1643,15 +1576,13 @@ __acquires(&gcwq->lock)
  */
 static void idle_worker_rebind(struct worker *worker)
 {
-       struct global_cwq *gcwq = worker->pool->gcwq;
-
        /* CPU may go down again inbetween, clear UNBOUND only on success */
        if (worker_maybe_bind_and_lock(worker))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        /* rebind complete, become available again */
        list_add(&worker->entry, &worker->pool->idle_list);
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&worker->pool->lock);
 }
 
 /*
@@ -1663,19 +1594,18 @@ static void idle_worker_rebind(struct worker *worker)
 static void busy_worker_rebind_fn(struct work_struct *work)
 {
        struct worker *worker = container_of(work, struct worker, rebind_work);
-       struct global_cwq *gcwq = worker->pool->gcwq;
 
        if (worker_maybe_bind_and_lock(worker))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&worker->pool->lock);
 }
 
 /**
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
- * @gcwq: gcwq of interest
+ * rebind_workers - rebind all workers of a pool to the associated CPU
+ * @pool: pool of interest
  *
- * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
+ * @pool->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
  * is different for idle and busy ones.
  *
  * Idle ones will be removed from the idle_list and woken up.  They will
@@ -1693,38 +1623,32 @@ static void busy_worker_rebind_fn(struct work_struct *work)
  * including the manager will not appear on @idle_list until rebind is
  * complete, making local wake-ups safe.
  */
-static void rebind_workers(struct global_cwq *gcwq)
+static void rebind_workers(struct worker_pool *pool)
 {
-       struct worker_pool *pool;
        struct worker *worker, *n;
        struct hlist_node *pos;
        int i;
 
-       lockdep_assert_held(&gcwq->lock);
-
-       for_each_worker_pool(pool, gcwq)
-               lockdep_assert_held(&pool->assoc_mutex);
+       lockdep_assert_held(&pool->assoc_mutex);
+       lockdep_assert_held(&pool->lock);
 
        /* dequeue and kick idle ones */
-       for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
-                       /*
-                        * idle workers should be off @pool->idle_list
-                        * until rebind is complete to avoid receiving
-                        * premature local wake-ups.
-                        */
-                       list_del_init(&worker->entry);
+       list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
+               /*
+                * idle workers should be off @pool->idle_list until rebind
+                * is complete to avoid receiving premature local wake-ups.
+                */
+               list_del_init(&worker->entry);
 
-                       /*
-                        * worker_thread() will see the above dequeuing
-                        * and call idle_worker_rebind().
-                        */
-                       wake_up_process(worker->task);
-               }
+               /*
+                * worker_thread() will see the above dequeuing and call
+                * idle_worker_rebind().
+                */
+               wake_up_process(worker->task);
        }
 
        /* rebind busy workers */
-       for_each_busy_worker(worker, i, pos, gcwq) {
+       for_each_busy_worker(worker, i, pos, pool) {
                struct work_struct *rebind_work = &worker->rebind_work;
                struct workqueue_struct *wq;
 
@@ -1736,16 +1660,16 @@ static void rebind_workers(struct global_cwq *gcwq)
 
                /*
                 * wq doesn't really matter but let's keep @worker->pool
-                * and @cwq->pool consistent for sanity.
+                * and @pwq->pool consistent for sanity.
                 */
-               if (worker_pool_pri(worker->pool))
+               if (std_worker_pool_pri(worker->pool))
                        wq = system_highpri_wq;
                else
                        wq = system_wq;
 
-               insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
-                       worker->scheduled.next,
-                       work_color_to_flags(WORK_NO_COLOR));
+               insert_work(get_pwq(pool->cpu, wq), rebind_work,
+                           worker->scheduled.next,
+                           work_color_to_flags(WORK_NO_COLOR));
        }
 }
 
@@ -1780,19 +1704,18 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct worker_pool *pool)
 {
-       struct global_cwq *gcwq = pool->gcwq;
-       const char *pri = worker_pool_pri(pool) ? "H" : "";
+       const char *pri = std_worker_pool_pri(pool) ? "H" : "";
        struct worker *worker = NULL;
        int id = -1;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        while (ida_get_new(&pool->worker_ida, &id)) {
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
                if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
                        goto fail;
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
        }
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        worker = alloc_worker();
        if (!worker)
@@ -1801,30 +1724,30 @@ static struct worker *create_worker(struct worker_pool *pool)
        worker->pool = pool;
        worker->id = id;
 
-       if (gcwq->cpu != WORK_CPU_UNBOUND)
+       if (pool->cpu != WORK_CPU_UNBOUND)
                worker->task = kthread_create_on_node(worker_thread,
-                                       worker, cpu_to_node(gcwq->cpu),
-                                       "kworker/%u:%d%s", gcwq->cpu, id, pri);
+                                       worker, cpu_to_node(pool->cpu),
+                                       "kworker/%u:%d%s", pool->cpu, id, pri);
        else
                worker->task = kthread_create(worker_thread, worker,
                                              "kworker/u:%d%s", id, pri);
        if (IS_ERR(worker->task))
                goto fail;
 
-       if (worker_pool_pri(pool))
+       if (std_worker_pool_pri(pool))
                set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
 
        /*
         * Determine CPU binding of the new worker depending on
-        * %GCWQ_DISASSOCIATED.  The caller is responsible for ensuring the
+        * %POOL_DISASSOCIATED.  The caller is responsible for ensuring the
         * flag remains stable across this function.  See the comments
         * above the flag definition for details.
         *
         * As an unbound worker may later become a regular one if CPU comes
         * online, make sure every worker has %PF_THREAD_BOUND set.
         */
-       if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
-               kthread_bind(worker->task, gcwq->cpu);
+       if (!(pool->flags & POOL_DISASSOCIATED)) {
+               kthread_bind(worker->task, pool->cpu);
        } else {
                worker->task->flags |= PF_THREAD_BOUND;
                worker->flags |= WORKER_UNBOUND;
@@ -1833,9 +1756,9 @@ static struct worker *create_worker(struct worker_pool *pool)
        return worker;
 fail:
        if (id >= 0) {
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
                ida_remove(&pool->worker_ida, id);
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
        kfree(worker);
        return NULL;
@@ -1845,10 +1768,10 @@ fail:
  * start_worker - start a newly created worker
  * @worker: worker to start
  *
- * Make the gcwq aware of @worker and start it.
+ * Make the pool aware of @worker and start it.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void start_worker(struct worker *worker)
 {
@@ -1862,15 +1785,14 @@ static void start_worker(struct worker *worker)
  * destroy_worker - destroy a workqueue worker
  * @worker: worker to be destroyed
  *
- * Destroy @worker and adjust @gcwq stats accordingly.
+ * Destroy @worker and adjust @pool stats accordingly.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which is released and regrabbed.
+ * spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void destroy_worker(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
        int id = worker->id;
 
        /* sanity check frenzy */
@@ -1885,21 +1807,20 @@ static void destroy_worker(struct worker *worker)
        list_del_init(&worker->entry);
        worker->flags |= WORKER_DIE;
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        kthread_stop(worker->task);
        kfree(worker);
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        ida_remove(&pool->worker_ida, id);
 }
 
 static void idle_worker_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        if (too_many_workers(pool)) {
                struct worker *worker;
@@ -1918,20 +1839,20 @@ static void idle_worker_timeout(unsigned long __pool)
                }
        }
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 }
 
 static bool send_mayday(struct work_struct *work)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
-       struct workqueue_struct *wq = cwq->wq;
+       struct pool_workqueue *pwq = get_work_pwq(work);
+       struct workqueue_struct *wq = pwq->wq;
        unsigned int cpu;
 
        if (!(wq->flags & WQ_RESCUER))
                return false;
 
        /* mayday mayday mayday */
-       cpu = cwq->pool->gcwq->cpu;
+       cpu = pwq->pool->cpu;
        /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
        if (cpu == WORK_CPU_UNBOUND)
                cpu = 0;
@@ -1940,13 +1861,12 @@ static bool send_mayday(struct work_struct *work)
        return true;
 }
 
-static void gcwq_mayday_timeout(unsigned long __pool)
+static void pool_mayday_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
-       struct global_cwq *gcwq = pool->gcwq;
        struct work_struct *work;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        if (need_to_create_worker(pool)) {
                /*
@@ -1959,7 +1879,7 @@ static void gcwq_mayday_timeout(unsigned long __pool)
                        send_mayday(work);
        }
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
 }
@@ -1978,24 +1898,22 @@ static void gcwq_mayday_timeout(unsigned long __pool)
  * may_start_working() true.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true
+ * false if no action was taken and pool->lock stayed locked, true
  * otherwise.
  */
 static bool maybe_create_worker(struct worker_pool *pool)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
+__releases(&pool->lock)
+__acquires(&pool->lock)
 {
-       struct global_cwq *gcwq = pool->gcwq;
-
        if (!need_to_create_worker(pool))
                return false;
 restart:
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
@@ -2006,7 +1924,7 @@ restart:
                worker = create_worker(pool);
                if (worker) {
                        del_timer_sync(&pool->mayday_timer);
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
                        BUG_ON(need_to_create_worker(pool));
                        return true;
@@ -2023,7 +1941,7 @@ restart:
        }
 
        del_timer_sync(&pool->mayday_timer);
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        if (need_to_create_worker(pool))
                goto restart;
        return true;
@@ -2037,11 +1955,11 @@ restart:
  * IDLE_WORKER_TIMEOUT.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Called only from manager.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true
+ * false if no action was taken and pool->lock stayed locked, true
  * otherwise.
  */
 static bool maybe_destroy_workers(struct worker_pool *pool)
@@ -2071,21 +1989,21 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * manage_workers - manage worker pool
  * @worker: self
  *
- * Assume the manager role and manage gcwq worker pool @worker belongs
+ * Assume the manager role and manage the worker pool @worker belongs
  * to.  At any given time, there can be only zero or one manager per
- * gcwq.  The exclusion is handled automatically by this function.
+ * pool.  The exclusion is handled automatically by this function.
  *
  * The caller can safely start processing works on false return.  On
  * true return, it's guaranteed that need_to_create_worker() is false
  * and may_start_working() is true.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true if
- * some action was taken.
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times.  Does GFP_KERNEL allocations.
  */
 static bool manage_workers(struct worker *worker)
 {
@@ -2107,20 +2025,20 @@ static bool manage_workers(struct worker *worker)
         * manager against CPU hotplug.
         *
         * assoc_mutex would always be free unless CPU hotplug is in
-        * progress.  trylock first without dropping @gcwq->lock.
+        * progress.  trylock first without dropping @pool->lock.
         */
        if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
-               spin_unlock_irq(&pool->gcwq->lock);
+               spin_unlock_irq(&pool->lock);
                mutex_lock(&pool->assoc_mutex);
                /*
                 * CPU hotplug could have happened while we were waiting
                 * for assoc_mutex.  Hotplug itself can't handle us
                 * because manager isn't either on idle or busy list, and
-                * @gcwq's state and ours could have deviated.
+                * @pool's state and ours could have deviated.
                 *
                 * As hotplug is now excluded via assoc_mutex, we can
                 * simply try to bind.  It will succeed or fail depending
-                * on @gcwq's current state.  Try it and adjust
+                * on @pool's current state.  Try it and adjust
                 * %WORKER_UNBOUND accordingly.
                 */
                if (worker_maybe_bind_and_lock(worker))
@@ -2157,18 +2075,15 @@ static bool manage_workers(struct worker *worker)
  * call this function to process a work.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which is released and regrabbed.
+ * spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
+__releases(&pool->lock)
+__acquires(&pool->lock)
 {
-       struct cpu_workqueue_struct *cwq = get_work_cwq(work);
+       struct pool_workqueue *pwq = get_work_pwq(work);
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
-       struct hlist_head *bwh = busy_worker_head(gcwq, work);
-       bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
-       work_func_t f = work->func;
+       bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
        int work_color;
        struct worker *collision;
 #ifdef CONFIG_LOCKDEP
@@ -2186,11 +2101,11 @@ __acquires(&gcwq->lock)
        /*
         * Ensure we're on the correct CPU.  DISASSOCIATED test is
         * necessary to avoid spurious warnings from rescuers servicing the
-        * unbound or a disassociated gcwq.
+        * unbound or a disassociated pool.
         */
        WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
-                    !(gcwq->flags & GCWQ_DISASSOCIATED) &&
-                    raw_smp_processor_id() != gcwq->cpu);
+                    !(pool->flags & POOL_DISASSOCIATED) &&
+                    raw_smp_processor_id() != pool->cpu);
 
        /*
         * A single work shouldn't be executed concurrently by
@@ -2198,7 +2113,7 @@ __acquires(&gcwq->lock)
         * already processing the work.  If so, defer the work to the
         * currently executing one.
         */
-       collision = __find_worker_executing_work(gcwq, bwh, work);
+       collision = find_worker_executing_work(pool, work);
        if (unlikely(collision)) {
                move_linked_works(work, &collision->scheduled, NULL);
                return;
@@ -2206,9 +2121,10 @@ __acquires(&gcwq->lock)
 
        /* claim and dequeue */
        debug_work_deactivate(work);
-       hlist_add_head(&worker->hentry, bwh);
+       hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
        worker->current_work = work;
-       worker->current_cwq = cwq;
+       worker->current_func = work->func;
+       worker->current_pwq = pwq;
        work_color = get_work_color(work);
 
        list_del_init(&work->entry);
@@ -2221,53 +2137,55 @@ __acquires(&gcwq->lock)
                worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
 
        /*
-        * Unbound gcwq isn't concurrency managed and work items should be
+        * Unbound pool isn't concurrency managed and work items should be
         * executed ASAP.  Wake up another worker if necessary.
         */
        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
                wake_up_worker(pool);
 
        /*
-        * Record the last CPU and clear PENDING which should be the last
-        * update to @work.  Also, do this inside @gcwq->lock so that
+        * Record the last pool and clear PENDING which should be the last
+        * update to @work.  Also, do this inside @pool->lock so that
         * PENDING and queued state changes happen together while IRQ is
         * disabled.
         */
-       set_work_cpu_and_clear_pending(work, gcwq->cpu);
+       set_work_pool_and_clear_pending(work, pool->id);
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
-       lock_map_acquire_read(&cwq->wq->lockdep_map);
+       lock_map_acquire_read(&pwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
-       f(work);
+       worker->current_func(work);
        /*
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
         */
        trace_workqueue_execute_end(work);
        lock_map_release(&lockdep_map);
-       lock_map_release(&cwq->wq->lockdep_map);
+       lock_map_release(&pwq->wq->lockdep_map);
 
        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
                       "     last function: %pf\n",
-                      current->comm, preempt_count(), task_pid_nr(current), f);
+                      current->comm, preempt_count(), task_pid_nr(current),
+                      worker->current_func);
                debug_show_held_locks(current);
                dump_stack();
        }
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        /* clear cpu intensive status */
        if (unlikely(cpu_intensive))
                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
        /* we're done with it, release */
-       hlist_del_init(&worker->hentry);
+       hash_del(&worker->hentry);
        worker->current_work = NULL;
-       worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       worker->current_func = NULL;
+       worker->current_pwq = NULL;
+       pwq_dec_nr_in_flight(pwq, work_color);
 }
 
 /**
@@ -2279,7 +2197,7 @@ __acquires(&gcwq->lock)
  * fetches a work from the top and executes it.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.
  */
 static void process_scheduled_works(struct worker *worker)
@@ -2295,8 +2213,8 @@ static void process_scheduled_works(struct worker *worker)
  * worker_thread - the worker thread function
  * @__worker: self
  *
- * The gcwq worker thread function.  There's a single dynamic pool of
- * these per each cpu.  These workers process all works regardless of
+ * The worker thread function.  There are NR_CPU_WORKER_POOLS dynamic pools
+ * of these per each cpu.  These workers process all works regardless of
  * their specific target workqueue.  The only exception is works which
  * belong to workqueues with a rescuer which will be explained in
  * rescuer_thread().
@@ -2305,16 +2223,15 @@ static int worker_thread(void *__worker)
 {
        struct worker *worker = __worker;
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
        /* tell the scheduler that this is a workqueue worker */
        worker->task->flags |= PF_WQ_WORKER;
 woke_up:
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        /* we are off idle list if destruction or rebind is requested */
        if (unlikely(list_empty(&worker->entry))) {
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
 
                /* if DIE is set, destruction is requested */
                if (worker->flags & WORKER_DIE) {
@@ -2373,52 +2290,59 @@ sleep:
                goto recheck;
 
        /*
-        * gcwq->lock is held and there's no work to process and no
-        * need to manage, sleep.  Workers are woken up only while
-        * holding gcwq->lock or from local cpu, so setting the
-        * current state before releasing gcwq->lock is enough to
-        * prevent losing any event.
+        * pool->lock is held and there's no work to process and no need to
+        * manage, sleep.  Workers are woken up only while holding
+        * pool->lock or from local cpu, so setting the current state
+        * before releasing pool->lock is enough to prevent losing any
+        * event.
         */
        worker_enter_idle(worker);
        __set_current_state(TASK_INTERRUPTIBLE);
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
        schedule();
        goto woke_up;
 }
 
 /**
  * rescuer_thread - the rescuer thread function
- * @__wq: the associated workqueue
+ * @__rescuer: self
  *
  * Workqueue rescuer thread function.  There's one rescuer for each
  * workqueue which has WQ_RESCUER set.
  *
- * Regular work processing on a gcwq may block trying to create a new
+ * Regular work processing on a pool may block trying to create a new
  * worker which uses GFP_KERNEL allocation which has slight chance of
  * developing into deadlock if some works currently on the same queue
  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
  * the problem rescuer solves.
  *
- * When such condition is possible, the gcwq summons rescuers of all
- * workqueues which have works queued on the gcwq and let them process
+ * When such condition is possible, the pool summons rescuers of all
+ * workqueues which have works queued on the pool and let them process
  * those works so that forward progress can be guaranteed.
  *
  * This should happen rarely.
  */
-static int rescuer_thread(void *__wq)
+static int rescuer_thread(void *__rescuer)
 {
-       struct workqueue_struct *wq = __wq;
-       struct worker *rescuer = wq->rescuer;
+       struct worker *rescuer = __rescuer;
+       struct workqueue_struct *wq = rescuer->rescue_wq;
        struct list_head *scheduled = &rescuer->scheduled;
        bool is_unbound = wq->flags & WQ_UNBOUND;
        unsigned int cpu;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
+
+       /*
+        * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
+        * doesn't participate in concurrency management.
+        */
+       rescuer->task->flags |= PF_WQ_WORKER;
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
+               rescuer->task->flags &= ~PF_WQ_WORKER;
                return 0;
        }
 
@@ -2428,9 +2352,8 @@ repeat:
         */
        for_each_mayday_cpu(cpu, wq->mayday_mask) {
                unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
-               struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
-               struct worker_pool *pool = cwq->pool;
-               struct global_cwq *gcwq = pool->gcwq;
+               struct pool_workqueue *pwq = get_pwq(tcpu, wq);
+               struct worker_pool *pool = pwq->pool;
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
@@ -2446,22 +2369,24 @@ repeat:
                 */
                BUG_ON(!list_empty(&rescuer->scheduled));
                list_for_each_entry_safe(work, n, &pool->worklist, entry)
-                       if (get_work_cwq(work) == cwq)
+                       if (get_work_pwq(work) == pwq)
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
 
                /*
-                * Leave this gcwq.  If keep_working() is %true, notify a
+                * Leave this pool.  If keep_working() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
                 * and stalling the execution.
                 */
                if (keep_working(pool))
                        wake_up_worker(pool);
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
 
+       /* rescuers should never participate in concurrency management */
+       WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
        schedule();
        goto repeat;
 }
@@ -2479,7 +2404,7 @@ static void wq_barrier_func(struct work_struct *work)
 
 /**
  * insert_wq_barrier - insert a barrier work
- * @cwq: cwq to insert barrier into
+ * @pwq: pwq to insert barrier into
  * @barr: wq_barrier to insert
  * @target: target work to attach @barr to
  * @worker: worker currently executing @target, NULL if @target is not executing
@@ -2496,12 +2421,12 @@ static void wq_barrier_func(struct work_struct *work)
  * after a work with LINKED flag set.
  *
  * Note that when @worker is non-NULL, @target may be modified
- * underneath us, so we can't reliably determine cwq from @target.
+ * underneath us, so we can't reliably determine pwq from @target.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
+static void insert_wq_barrier(struct pool_workqueue *pwq,
                              struct wq_barrier *barr,
                              struct work_struct *target, struct worker *worker)
 {
@@ -2509,7 +2434,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        unsigned int linked = 0;
 
        /*
-        * debugobject calls are safe here even with gcwq->lock locked
+        * debugobject calls are safe here even with pool->lock locked
         * as we know for sure that this will not trigger any of the
         * checks and call back into the fixup functions where we
         * might deadlock.
@@ -2534,23 +2459,23 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        }
 
        debug_work_activate(&barr->work);
-       insert_work(cwq, &barr->work, head,
+       insert_work(pwq, &barr->work, head,
                    work_color_to_flags(WORK_NO_COLOR) | linked);
 }
 
 /**
- * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
+ * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
  * @wq: workqueue being flushed
  * @flush_color: new flush color, < 0 for no-op
  * @work_color: new work color, < 0 for no-op
  *
- * Prepare cwqs for workqueue flushing.
+ * Prepare pwqs for workqueue flushing.
  *
- * If @flush_color is non-negative, flush_color on all cwqs should be
- * -1.  If no cwq has in-flight commands at the specified color, all
- * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
- * has in flight commands, its cwq->flush_color is set to
- * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
+ * If @flush_color is non-negative, flush_color on all pwqs should be
+ * -1.  If no pwq has in-flight commands at the specified color, all
+ * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
+ * has in flight commands, its pwq->flush_color is set to
+ * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
  * wakeup logic is armed and %true is returned.
  *
  * The caller should have initialized @wq->first_flusher prior to
@@ -2558,7 +2483,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  * @flush_color is negative, no flush color update is done and %false
  * is returned.
  *
- * If @work_color is non-negative, all cwqs should have the same
+ * If @work_color is non-negative, all pwqs should have the same
  * work_color which is previous to @work_color and all will be
  * advanced to @work_color.
  *
@@ -2569,42 +2494,42 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  * %true if @flush_color >= 0 and there's something to flush.  %false
  * otherwise.
  */
-static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
+static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
                                      int flush_color, int work_color)
 {
        bool wait = false;
        unsigned int cpu;
 
        if (flush_color >= 0) {
-               BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
-               atomic_set(&wq->nr_cwqs_to_flush, 1);
+               BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
+               atomic_set(&wq->nr_pwqs_to_flush, 1);
        }
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct global_cwq *gcwq = cwq->pool->gcwq;
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+               struct worker_pool *pool = pwq->pool;
 
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
 
                if (flush_color >= 0) {
-                       BUG_ON(cwq->flush_color != -1);
+                       BUG_ON(pwq->flush_color != -1);
 
-                       if (cwq->nr_in_flight[flush_color]) {
-                               cwq->flush_color = flush_color;
-                               atomic_inc(&wq->nr_cwqs_to_flush);
+                       if (pwq->nr_in_flight[flush_color]) {
+                               pwq->flush_color = flush_color;
+                               atomic_inc(&wq->nr_pwqs_to_flush);
                                wait = true;
                        }
                }
 
                if (work_color >= 0) {
-                       BUG_ON(work_color != work_next_color(cwq->work_color));
-                       cwq->work_color = work_color;
+                       BUG_ON(work_color != work_next_color(pwq->work_color));
+                       pwq->work_color = work_color;
                }
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
 
-       if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
+       if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
                complete(&wq->first_flusher->done);
 
        return wait;
@@ -2655,7 +2580,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                        wq->first_flusher = &this_flusher;
 
-                       if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
+                       if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
                                                       wq->work_color)) {
                                /* nothing to flush, done */
                                wq->flush_color = next_color;
@@ -2666,7 +2591,7 @@ void flush_workqueue(struct workqueue_struct *wq)
                        /* wait in queue */
                        BUG_ON(wq->flush_color == this_flusher.flush_color);
                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
-                       flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
+                       flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
        } else {
                /*
@@ -2733,7 +2658,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                        list_splice_tail_init(&wq->flusher_overflow,
                                              &wq->flusher_queue);
-                       flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
+                       flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
                }
 
                if (list_empty(&wq->flusher_queue)) {
@@ -2743,7 +2668,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 
                /*
                 * Need to flush more colors.  Make the next flusher
-                * the new first flusher and arm cwqs.
+                * the new first flusher and arm pwqs.
                 */
                BUG_ON(wq->flush_color == wq->work_color);
                BUG_ON(wq->flush_color != next->flush_color);
@@ -2751,7 +2676,7 @@ void flush_workqueue(struct workqueue_struct *wq)
                list_del_init(&next->list);
                wq->first_flusher = next;
 
-               if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
+               if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
                        break;
 
                /*
@@ -2794,13 +2719,13 @@ void drain_workqueue(struct workqueue_struct *wq)
 reflush:
        flush_workqueue(wq);
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
                bool drained;
 
-               spin_lock_irq(&cwq->pool->gcwq->lock);
-               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
-               spin_unlock_irq(&cwq->pool->gcwq->lock);
+               spin_lock_irq(&pwq->pool->lock);
+               drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
+               spin_unlock_irq(&pwq->pool->lock);
 
                if (drained)
                        continue;
@@ -2822,34 +2747,29 @@ EXPORT_SYMBOL_GPL(drain_workqueue);
 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 {
        struct worker *worker = NULL;
-       struct global_cwq *gcwq;
-       struct cpu_workqueue_struct *cwq;
+       struct worker_pool *pool;
+       struct pool_workqueue *pwq;
 
        might_sleep();
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
+       pool = get_work_pool(work);
+       if (!pool)
                return false;
 
-       spin_lock_irq(&gcwq->lock);
-       if (!list_empty(&work->entry)) {
-               /*
-                * See the comment near try_to_grab_pending()->smp_rmb().
-                * If it was re-queued to a different gcwq under us, we
-                * are not going to wait.
-                */
-               smp_rmb();
-               cwq = get_work_cwq(work);
-               if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
+       spin_lock_irq(&pool->lock);
+       /* see the comment in try_to_grab_pending() with the same code */
+       pwq = get_work_pwq(work);
+       if (pwq) {
+               if (unlikely(pwq->pool != pool))
                        goto already_gone;
        } else {
-               worker = find_worker_executing_work(gcwq, work);
+               worker = find_worker_executing_work(pool, work);
                if (!worker)
                        goto already_gone;
-               cwq = worker->current_cwq;
+               pwq = worker->current_pwq;
        }
 
-       insert_wq_barrier(cwq, barr, work, worker);
-       spin_unlock_irq(&gcwq->lock);
+       insert_wq_barrier(pwq, barr, work, worker);
+       spin_unlock_irq(&pool->lock);
 
        /*
         * If @max_active is 1 or rescuer is in use, flushing another work
@@ -2857,15 +2777,15 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
         * flusher is not running on the same workqueue by verifying write
         * access.
         */
-       if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
-               lock_map_acquire(&cwq->wq->lockdep_map);
+       if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
+               lock_map_acquire(&pwq->wq->lockdep_map);
        else
-               lock_map_acquire_read(&cwq->wq->lockdep_map);
-       lock_map_release(&cwq->wq->lockdep_map);
+               lock_map_acquire_read(&pwq->wq->lockdep_map);
+       lock_map_release(&pwq->wq->lockdep_map);
 
        return true;
 already_gone:
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
        return false;
 }
 
@@ -2961,8 +2881,7 @@ bool flush_delayed_work(struct delayed_work *dwork)
 {
        local_irq_disable();
        if (del_timer_sync(&dwork->timer))
-               __queue_work(dwork->cpu,
-                            get_work_cwq(&dwork->work)->wq, &dwork->work);
+               __queue_work(dwork->cpu, dwork->wq, &dwork->work);
        local_irq_enable();
        return flush_work(&dwork->work);
 }
@@ -2992,7 +2911,8 @@ bool cancel_delayed_work(struct delayed_work *dwork)
        if (unlikely(ret < 0))
                return false;
 
-       set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
+       set_work_pool_and_clear_pending(&dwork->work,
+                                       get_work_pool_id(&dwork->work));
        local_irq_restore(flags);
        return ret;
 }
@@ -3171,46 +3091,46 @@ int keventd_up(void)
        return system_wq != NULL;
 }
 
-static int alloc_cwqs(struct workqueue_struct *wq)
+static int alloc_pwqs(struct workqueue_struct *wq)
 {
        /*
-        * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
+        * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
         * Make sure that the alignment isn't lower than that of
         * unsigned long long.
         */
-       const size_t size = sizeof(struct cpu_workqueue_struct);
+       const size_t size = sizeof(struct pool_workqueue);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
 
        if (!(wq->flags & WQ_UNBOUND))
-               wq->cpu_wq.pcpu = __alloc_percpu(size, align);
+               wq->pool_wq.pcpu = __alloc_percpu(size, align);
        else {
                void *ptr;
 
                /*
-                * Allocate enough room to align cwq and put an extra
+                * Allocate enough room to align pwq and put an extra
                 * pointer at the end pointing back to the originally
                 * allocated pointer which will be used for free.
                 */
                ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
                if (ptr) {
-                       wq->cpu_wq.single = PTR_ALIGN(ptr, align);
-                       *(void **)(wq->cpu_wq.single + 1) = ptr;
+                       wq->pool_wq.single = PTR_ALIGN(ptr, align);
+                       *(void **)(wq->pool_wq.single + 1) = ptr;
                }
        }
 
        /* just in case, make sure it's actually aligned */
-       BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
-       return wq->cpu_wq.v ? 0 : -ENOMEM;
+       BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
+       return wq->pool_wq.v ? 0 : -ENOMEM;
 }
 
-static void free_cwqs(struct workqueue_struct *wq)
+static void free_pwqs(struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND))
-               free_percpu(wq->cpu_wq.pcpu);
-       else if (wq->cpu_wq.single) {
-               /* the pointer to free is stored right after the cwq */
-               kfree(*(void **)(wq->cpu_wq.single + 1));
+               free_percpu(wq->pool_wq.pcpu);
+       else if (wq->pool_wq.single) {
+               /* the pointer to free is stored right after the pwq */
+               kfree(*(void **)(wq->pool_wq.single + 1));
        }
 }
 
@@ -3264,27 +3184,25 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
-       atomic_set(&wq->nr_cwqs_to_flush, 0);
+       atomic_set(&wq->nr_pwqs_to_flush, 0);
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
 
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
-       if (alloc_cwqs(wq) < 0)
+       if (alloc_pwqs(wq) < 0)
                goto err;
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct global_cwq *gcwq = get_gcwq(cpu);
-               int pool_idx = (bool)(flags & WQ_HIGHPRI);
-
-               BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
-               cwq->pool = &gcwq->pools[pool_idx];
-               cwq->wq = wq;
-               cwq->flush_color = -1;
-               cwq->max_active = max_active;
-               INIT_LIST_HEAD(&cwq->delayed_works);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+
+               BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
+               pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
+               pwq->wq = wq;
+               pwq->flush_color = -1;
+               pwq->max_active = max_active;
+               INIT_LIST_HEAD(&pwq->delayed_works);
        }
 
        if (flags & WQ_RESCUER) {
@@ -3297,7 +3215,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                if (!rescuer)
                        goto err;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+               rescuer->rescue_wq = wq;
+               rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
                                               wq->name);
                if (IS_ERR(rescuer->task))
                        goto err;
@@ -3314,8 +3233,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        spin_lock(&workqueue_lock);
 
        if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
-               for_each_cwq_cpu(cpu, wq)
-                       get_cwq(cpu, wq)->max_active = 0;
+               for_each_pwq_cpu(cpu, wq)
+                       get_pwq(cpu, wq)->max_active = 0;
 
        list_add(&wq->list, &workqueues);
 
@@ -3324,7 +3243,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        return wq;
 err:
        if (wq) {
-               free_cwqs(wq);
+               free_pwqs(wq);
                free_mayday_mask(wq->mayday_mask);
                kfree(wq->rescuer);
                kfree(wq);
@@ -3355,14 +3274,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
        spin_unlock(&workqueue_lock);
 
        /* sanity check */
-       for_each_cwq_cpu(cpu, wq) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
                int i;
 
                for (i = 0; i < WORK_NR_COLORS; i++)
-                       BUG_ON(cwq->nr_in_flight[i]);
-               BUG_ON(cwq->nr_active);
-               BUG_ON(!list_empty(&cwq->delayed_works));
+                       BUG_ON(pwq->nr_in_flight[i]);
+               BUG_ON(pwq->nr_active);
+               BUG_ON(!list_empty(&pwq->delayed_works));
        }
 
        if (wq->flags & WQ_RESCUER) {
@@ -3371,29 +3290,29 @@ void destroy_workqueue(struct workqueue_struct *wq)
                kfree(wq->rescuer);
        }
 
-       free_cwqs(wq);
+       free_pwqs(wq);
        kfree(wq);
 }
 EXPORT_SYMBOL_GPL(destroy_workqueue);
 
 /**
- * cwq_set_max_active - adjust max_active of a cwq
- * @cwq: target cpu_workqueue_struct
+ * pwq_set_max_active - adjust max_active of a pwq
+ * @pwq: target pool_workqueue
  * @max_active: new max_active value.
  *
- * Set @cwq->max_active to @max_active and activate delayed works if
+ * Set @pwq->max_active to @max_active and activate delayed works if
  * increased.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
-static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
+static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
 {
-       cwq->max_active = max_active;
+       pwq->max_active = max_active;
 
-       while (!list_empty(&cwq->delayed_works) &&
-              cwq->nr_active < cwq->max_active)
-               cwq_activate_first_delayed(cwq);
+       while (!list_empty(&pwq->delayed_works) &&
+              pwq->nr_active < pwq->max_active)
+               pwq_activate_first_delayed(pwq);
 }
 
 /**
@@ -3416,16 +3335,17 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
        wq->saved_max_active = max_active;
 
-       for_each_cwq_cpu(cpu, wq) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_pwq_cpu(cpu, wq) {
+               struct pool_workqueue *pwq = get_pwq(cpu, wq);
+               struct worker_pool *pool = pwq->pool;
 
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
 
                if (!(wq->flags & WQ_FREEZABLE) ||
-                   !(gcwq->flags & GCWQ_FREEZING))
-                       cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
+                   !(pool->flags & POOL_FREEZING))
+                       pwq_set_max_active(pwq, max_active);
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
 
        spin_unlock(&workqueue_lock);
@@ -3446,27 +3366,12 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
  */
 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
 {
-       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-       return !list_empty(&cwq->delayed_works);
+       return !list_empty(&pwq->delayed_works);
 }
 EXPORT_SYMBOL_GPL(workqueue_congested);
 
-/**
- * work_cpu - return the last known associated cpu for @work
- * @work: the work of interest
- *
- * RETURNS:
- * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
- */
-unsigned int work_cpu(struct work_struct *work)
-{
-       struct global_cwq *gcwq = get_work_gcwq(work);
-
-       return gcwq ? gcwq->cpu : WORK_CPU_NONE;
-}
-EXPORT_SYMBOL_GPL(work_cpu);
-
 /**
  * work_busy - test whether a work is currently pending or running
  * @work: the work to be tested
@@ -3474,29 +3379,25 @@ EXPORT_SYMBOL_GPL(work_cpu);
  * Test whether @work is currently pending or running.  There is no
  * synchronization around this function and the test result is
  * unreliable and only useful as advisory hints or for debugging.
- * Especially for reentrant wqs, the pending state might hide the
- * running state.
  *
  * RETURNS:
  * OR'd bitmask of WORK_BUSY_* bits.
  */
 unsigned int work_busy(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_work_gcwq(work);
+       struct worker_pool *pool = get_work_pool(work);
        unsigned long flags;
        unsigned int ret = 0;
 
-       if (!gcwq)
-               return 0;
-
-       spin_lock_irqsave(&gcwq->lock, flags);
-
        if (work_pending(work))
                ret |= WORK_BUSY_PENDING;
-       if (find_worker_executing_work(gcwq, work))
-               ret |= WORK_BUSY_RUNNING;
 
-       spin_unlock_irqrestore(&gcwq->lock, flags);
+       if (pool) {
+               spin_lock_irqsave(&pool->lock, flags);
+               if (find_worker_executing_work(pool, work))
+                       ret |= WORK_BUSY_RUNNING;
+               spin_unlock_irqrestore(&pool->lock, flags);
+       }
 
        return ret;
 }
@@ -3506,65 +3407,49 @@ EXPORT_SYMBOL_GPL(work_busy);
  * CPU hotplug.
  *
  * There are two challenges in supporting CPU hotplug.  Firstly, there
- * are a lot of assumptions on strong associations among work, cwq and
- * gcwq which make migrating pending and scheduled works very
+ * are a lot of assumptions on strong associations among work, pwq and
+ * pool which make migrating pending and scheduled works very
  * difficult to implement without impacting hot paths.  Secondly,
- * gcwqs serve mix of short, long and very long running works making
+ * worker pools serve mix of short, long and very long running works making
  * blocked draining impractical.
  *
- * This is solved by allowing a gcwq to be disassociated from the CPU
+ * This is solved by allowing the pools to be disassociated from the CPU
  * running as an unbound one and allowing it to be reattached later if the
  * cpu comes back online.
  */
 
-/* claim manager positions of all pools */
-static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
+static void wq_unbind_fn(struct work_struct *work)
 {
-       struct worker_pool *pool;
-
-       for_each_worker_pool(pool, gcwq)
-               mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
-       spin_lock_irq(&gcwq->lock);
-}
-
-/* release manager positions */
-static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
-{
-       struct worker_pool *pool;
-
-       spin_unlock_irq(&gcwq->lock);
-       for_each_worker_pool(pool, gcwq)
-               mutex_unlock(&pool->assoc_mutex);
-}
-
-static void gcwq_unbind_fn(struct work_struct *work)
-{
-       struct global_cwq *gcwq = get_gcwq(smp_processor_id());
+       int cpu = smp_processor_id();
        struct worker_pool *pool;
        struct worker *worker;
        struct hlist_node *pos;
        int i;
 
-       BUG_ON(gcwq->cpu != smp_processor_id());
+       for_each_std_worker_pool(pool, cpu) {
+               BUG_ON(cpu != smp_processor_id());
 
-       gcwq_claim_assoc_and_lock(gcwq);
+               mutex_lock(&pool->assoc_mutex);
+               spin_lock_irq(&pool->lock);
 
-       /*
-        * We've claimed all manager positions.  Make all workers unbound
-        * and set DISASSOCIATED.  Before this, all workers except for the
-        * ones which are still executing works from before the last CPU
-        * down must be on the cpu.  After this, they may become diasporas.
-        */
-       for_each_worker_pool(pool, gcwq)
+               /*
+                * We've claimed all manager positions.  Make all workers
+                * unbound and set DISASSOCIATED.  Before this, all workers
+                * except for the ones which are still executing works from
+                * before the last CPU down must be on the cpu.  After
+                * this, they may become diasporas.
+                */
                list_for_each_entry(worker, &pool->idle_list, entry)
                        worker->flags |= WORKER_UNBOUND;
 
-       for_each_busy_worker(worker, i, pos, gcwq)
-               worker->flags |= WORKER_UNBOUND;
+               for_each_busy_worker(worker, i, pos, pool)
+                       worker->flags |= WORKER_UNBOUND;
 
-       gcwq->flags |= GCWQ_DISASSOCIATED;
+               pool->flags |= POOL_DISASSOCIATED;
 
-       gcwq_release_assoc_and_unlock(gcwq);
+               spin_unlock_irq(&pool->lock);
+               mutex_unlock(&pool->assoc_mutex);
+       }
 
        /*
         * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3576,16 +3461,16 @@ static void gcwq_unbind_fn(struct work_struct *work)
        /*
         * Sched callbacks are disabled now.  Zap nr_running.  After this,
         * nr_running stays zero and need_more_worker() and keep_working()
-        * are always true as long as the worklist is not empty.  @gcwq now
-        * behaves as unbound (in terms of concurrency management) gcwq
-        * which is served by workers tied to the CPU.
+        * are always true as long as the worklist is not empty.  Pools on
+        * @cpu now behave as unbound (in terms of concurrency management)
+        * pools which are served by workers tied to the CPU.
         *
         * On return from this function, the current worker would trigger
         * unbound chain execution of pending work items if other workers
         * didn't already.
         */
-       for_each_worker_pool(pool, gcwq)
-               atomic_set(get_pool_nr_running(pool), 0);
+       for_each_std_worker_pool(pool, cpu)
+               atomic_set(&pool->nr_running, 0);
 }
 
 /*
@@ -3597,12 +3482,11 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                                               void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
-       struct global_cwq *gcwq = get_gcwq(cpu);
        struct worker_pool *pool;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        struct worker *worker;
 
                        if (pool->nr_workers)
@@ -3612,18 +3496,24 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                        if (!worker)
                                return NOTIFY_BAD;
 
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
-                       spin_unlock_irq(&gcwq->lock);
+                       spin_unlock_irq(&pool->lock);
                }
                break;
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               gcwq_claim_assoc_and_lock(gcwq);
-               gcwq->flags &= ~GCWQ_DISASSOCIATED;
-               rebind_workers(gcwq);
-               gcwq_release_assoc_and_unlock(gcwq);
+               for_each_std_worker_pool(pool, cpu) {
+                       mutex_lock(&pool->assoc_mutex);
+                       spin_lock_irq(&pool->lock);
+
+                       pool->flags &= ~POOL_DISASSOCIATED;
+                       rebind_workers(pool);
+
+                       spin_unlock_irq(&pool->lock);
+                       mutex_unlock(&pool->assoc_mutex);
+               }
                break;
        }
        return NOTIFY_OK;
@@ -3643,7 +3533,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
                /* unbinding should happen on the local CPU */
-               INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
+               INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
                queue_work_on(cpu, system_highpri_wq, &unbind_work);
                flush_work(&unbind_work);
                break;
@@ -3696,10 +3586,10 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  *
  * Start freezing workqueues.  After this function returns, all freezable
  * workqueues will queue new works to their frozen_works list instead of
- * gcwq->worklist.
+ * pool->worklist.
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock and gcwq->lock's.
+ * Grabs and releases workqueue_lock and pool->lock's.
  */
 void freeze_workqueues_begin(void)
 {
@@ -3710,23 +3600,26 @@ void freeze_workqueues_begin(void)
        BUG_ON(workqueue_freezing);
        workqueue_freezing = true;
 
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_wq_cpu(cpu) {
+               struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               spin_lock_irq(&gcwq->lock);
+               for_each_std_worker_pool(pool, cpu) {
+                       spin_lock_irq(&pool->lock);
 
-               BUG_ON(gcwq->flags & GCWQ_FREEZING);
-               gcwq->flags |= GCWQ_FREEZING;
+                       WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+                       pool->flags |= POOL_FREEZING;
 
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+                       list_for_each_entry(wq, &workqueues, list) {
+                               struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-                       if (cwq && wq->flags & WQ_FREEZABLE)
-                               cwq->max_active = 0;
-               }
+                               if (pwq && pwq->pool == pool &&
+                                   (wq->flags & WQ_FREEZABLE))
+                                       pwq->max_active = 0;
+                       }
 
-               spin_unlock_irq(&gcwq->lock);
+                       spin_unlock_irq(&pool->lock);
+               }
        }
 
        spin_unlock(&workqueue_lock);
@@ -3754,20 +3647,20 @@ bool freeze_workqueues_busy(void)
 
        BUG_ON(!workqueue_freezing);
 
-       for_each_gcwq_cpu(cpu) {
+       for_each_wq_cpu(cpu) {
                struct workqueue_struct *wq;
                /*
                 * nr_active is monotonically decreasing.  It's safe
                 * to peek without lock.
                 */
                list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+                       struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
+                       if (!pwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
-                       BUG_ON(cwq->nr_active < 0);
-                       if (cwq->nr_active) {
+                       BUG_ON(pwq->nr_active < 0);
+                       if (pwq->nr_active) {
                                busy = true;
                                goto out_unlock;
                        }
@@ -3782,10 +3675,10 @@ out_unlock:
  * thaw_workqueues - thaw workqueues
  *
  * Thaw workqueues.  Normal queueing is restored and all collected
- * frozen works are transferred to their respective gcwq worklists.
+ * frozen works are transferred to their respective pool worklists.
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock and gcwq->lock's.
+ * Grabs and releases workqueue_lock and pool->lock's.
  */
 void thaw_workqueues(void)
 {
@@ -3796,30 +3689,31 @@ void thaw_workqueues(void)
        if (!workqueue_freezing)
                goto out_unlock;
 
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_wq_cpu(cpu) {
                struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               spin_lock_irq(&gcwq->lock);
+               for_each_std_worker_pool(pool, cpu) {
+                       spin_lock_irq(&pool->lock);
 
-               BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
-               gcwq->flags &= ~GCWQ_FREEZING;
+                       WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
+                       pool->flags &= ~POOL_FREEZING;
 
-               list_for_each_entry(wq, &workqueues, list) {
-                       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+                       list_for_each_entry(wq, &workqueues, list) {
+                               struct pool_workqueue *pwq = get_pwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
-                               continue;
+                               if (!pwq || pwq->pool != pool ||
+                                   !(wq->flags & WQ_FREEZABLE))
+                                       continue;
 
-                       /* restore max_active and repopulate worklist */
-                       cwq_set_max_active(cwq, wq->saved_max_active);
-               }
+                               /* restore max_active and repopulate worklist */
+                               pwq_set_max_active(pwq, wq->saved_max_active);
+                       }
 
-               for_each_worker_pool(pool, gcwq)
                        wake_up_worker(pool);
 
-               spin_unlock_irq(&gcwq->lock);
+                       spin_unlock_irq(&pool->lock);
+               }
        }
 
        workqueue_freezing = false;
@@ -3831,60 +3725,56 @@ out_unlock:
 static int __init init_workqueues(void)
 {
        unsigned int cpu;
-       int i;
 
-       /* make sure we have enough bits for OFFQ CPU number */
-       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
-                    WORK_CPU_LAST);
+       /* make sure we have enough bits for OFFQ pool ID */
+       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
+                    WORK_CPU_END * NR_STD_WORKER_POOLS);
 
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 
-       /* initialize gcwqs */
-       for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       /* initialize CPU pools */
+       for_each_wq_cpu(cpu) {
                struct worker_pool *pool;
 
-               spin_lock_init(&gcwq->lock);
-               gcwq->cpu = cpu;
-               gcwq->flags |= GCWQ_DISASSOCIATED;
-
-               for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
-                       INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
-
-               for_each_worker_pool(pool, gcwq) {
-                       pool->gcwq = gcwq;
+               for_each_std_worker_pool(pool, cpu) {
+                       spin_lock_init(&pool->lock);
+                       pool->cpu = cpu;
+                       pool->flags |= POOL_DISASSOCIATED;
                        INIT_LIST_HEAD(&pool->worklist);
                        INIT_LIST_HEAD(&pool->idle_list);
+                       hash_init(pool->busy_hash);
 
                        init_timer_deferrable(&pool->idle_timer);
                        pool->idle_timer.function = idle_worker_timeout;
                        pool->idle_timer.data = (unsigned long)pool;
 
-                       setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
+                       setup_timer(&pool->mayday_timer, pool_mayday_timeout,
                                    (unsigned long)pool);
 
                        mutex_init(&pool->assoc_mutex);
                        ida_init(&pool->worker_ida);
+
+                       /* alloc pool ID */
+                       BUG_ON(worker_pool_assign_id(pool));
                }
        }
 
        /* create the initial worker */
-       for_each_online_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+       for_each_online_wq_cpu(cpu) {
                struct worker_pool *pool;
 
-               if (cpu != WORK_CPU_UNBOUND)
-                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
-
-               for_each_worker_pool(pool, gcwq) {
+               for_each_std_worker_pool(pool, cpu) {
                        struct worker *worker;
 
+                       if (cpu != WORK_CPU_UNBOUND)
+                               pool->flags &= ~POOL_DISASSOCIATED;
+
                        worker = create_worker(pool);
                        BUG_ON(!worker);
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
-                       spin_unlock_irq(&gcwq->lock);
+                       spin_unlock_irq(&pool->lock);
                }
        }
 
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
new file mode 100644 (file)
index 0000000..0765026
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * kernel/workqueue_internal.h
+ *
+ * Workqueue internal header file.  Only to be included by workqueue and
+ * core kernel subsystems.
+ */
+#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
+#define _KERNEL_WORKQUEUE_INTERNAL_H
+
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+
+struct worker_pool;
+
+/*
+ * The poor guys doing the actual heavy lifting.  All on-duty workers are
+ * either serving the manager role, on idle list or on busy hash.  For
+ * details on the locking annotation (L, I, X...), refer to workqueue.c.
+ *
+ * Only to be used in workqueue and async.
+ */
+struct worker {
+       /* on idle list while idle, on busy hash table while busy */
+       union {
+               struct list_head        entry;  /* L: while idle */
+               struct hlist_node       hentry; /* L: while busy */
+       };
+
+       struct work_struct      *current_work;  /* L: work being processed */
+       work_func_t             current_func;   /* L: current_work's fn */
+       struct pool_workqueue   *current_pwq; /* L: current_work's pwq */
+       struct list_head        scheduled;      /* L: scheduled works */
+       struct task_struct      *task;          /* I: worker task */
+       struct worker_pool      *pool;          /* I: the associated pool */
+       /* 64 bytes boundary on 64bit, 32 on 32bit */
+       unsigned long           last_active;    /* L: last active timestamp */
+       unsigned int            flags;          /* X: flags */
+       int                     id;             /* I: worker id */
+
+       /* for rebinding worker to CPU */
+       struct work_struct      rebind_work;    /* L: for busy worker */
+
+       /* used only by rescuers to point to the target workqueue */
+       struct workqueue_struct *rescue_wq;     /* I: the workqueue to rescue */
+};
+
+/**
+ * current_wq_worker - return struct worker if %current is a workqueue worker
+ */
+static inline struct worker *current_wq_worker(void)
+{
+       if (current->flags & PF_WQ_WORKER)
+               return kthread_data(current);
+       return NULL;
+}
+
+/*
+ * Scheduler hooks for concurrency managed workqueue.  Only to be used from
+ * sched.c and workqueue.c.
+ */
+void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
+struct task_struct *wq_worker_sleeping(struct task_struct *task,
+                                      unsigned int cpu);
+
+#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h
deleted file mode 100644 (file)
index 2d10fc9..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * kernel/workqueue_sched.h
- *
- * Scheduler hooks for concurrency managed workqueue.  Only to be
- * included from sched.c and workqueue.c.
- */
-void wq_worker_waking_up(struct task_struct *task, unsigned int cpu);
-struct task_struct *wq_worker_sleeping(struct task_struct *task,
-                                      unsigned int cpu);
index 67604e599384ebdbfada31a854e5ae59312e26e0..a1714c897e3f185778ece70c9797063968dd4d4f 100644 (file)
@@ -605,61 +605,6 @@ config PROVE_LOCKING
 
         For more details, see Documentation/lockdep-design.txt.
 
-config PROVE_RCU
-       bool "RCU debugging: prove RCU correctness"
-       depends on PROVE_LOCKING
-       default n
-       help
-        This feature enables lockdep extensions that check for correct
-        use of RCU APIs.  This is currently under development.  Say Y
-        if you want to debug RCU usage or help work on the PROVE_RCU
-        feature.
-
-        Say N if you are unsure.
-
-config PROVE_RCU_REPEATEDLY
-       bool "RCU debugging: don't disable PROVE_RCU on first splat"
-       depends on PROVE_RCU
-       default n
-       help
-        By itself, PROVE_RCU will disable checking upon issuing the
-        first warning (or "splat").  This feature prevents such
-        disabling, allowing multiple RCU-lockdep warnings to be printed
-        on a single reboot.
-
-        Say Y to allow multiple RCU-lockdep warnings per boot.
-
-        Say N if you are unsure.
-
-config PROVE_RCU_DELAY
-       bool "RCU debugging: preemptible RCU race provocation"
-       depends on DEBUG_KERNEL && PREEMPT_RCU
-       default n
-       help
-        There is a class of races that involve an unlikely preemption
-        of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
-        been set to INT_MIN.  This feature inserts a delay at that
-        point to increase the probability of these races.
-
-        Say Y to increase probability of preemption of __rcu_read_unlock().
-
-        Say N if you are unsure.
-
-config SPARSE_RCU_POINTER
-       bool "RCU debugging: sparse-based checks for pointer usage"
-       default n
-       help
-        This feature enables the __rcu sparse annotation for
-        RCU-protected pointers.  This annotation will cause sparse
-        to flag any non-RCU used of annotated pointers.  This can be
-        helpful when debugging RCU usage.  Please note that this feature
-        is not intended to enforce code cleanliness; it is instead merely
-        a debugging aid.
-
-        Say Y to make sparse flag questionable use of RCU-protected pointers
-
-        Say N if you are unsure.
-
 config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -937,6 +882,63 @@ config BOOT_PRINTK_DELAY
          BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect
          what it believes to be lockup conditions.
 
+menu "RCU Debugging"
+
+config PROVE_RCU
+       bool "RCU debugging: prove RCU correctness"
+       depends on PROVE_LOCKING
+       default n
+       help
+        This feature enables lockdep extensions that check for correct
+        use of RCU APIs.  This is currently under development.  Say Y
+        if you want to debug RCU usage or help work on the PROVE_RCU
+        feature.
+
+        Say N if you are unsure.
+
+config PROVE_RCU_REPEATEDLY
+       bool "RCU debugging: don't disable PROVE_RCU on first splat"
+       depends on PROVE_RCU
+       default n
+       help
+        By itself, PROVE_RCU will disable checking upon issuing the
+        first warning (or "splat").  This feature prevents such
+        disabling, allowing multiple RCU-lockdep warnings to be printed
+        on a single reboot.
+
+        Say Y to allow multiple RCU-lockdep warnings per boot.
+
+        Say N if you are unsure.
+
+config PROVE_RCU_DELAY
+       bool "RCU debugging: preemptible RCU race provocation"
+       depends on DEBUG_KERNEL && PREEMPT_RCU
+       default n
+       help
+        There is a class of races that involve an unlikely preemption
+        of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
+        been set to INT_MIN.  This feature inserts a delay at that
+        point to increase the probability of these races.
+
+        Say Y to increase probability of preemption of __rcu_read_unlock().
+
+        Say N if you are unsure.
+
+config SPARSE_RCU_POINTER
+       bool "RCU debugging: sparse-based checks for pointer usage"
+       default n
+       help
+        This feature enables the __rcu sparse annotation for
+        RCU-protected pointers.  This annotation will cause sparse
+        to flag any non-RCU used of annotated pointers.  This can be
+        helpful when debugging RCU usage.  Please note that this feature
+        is not intended to enforce code cleanliness; it is instead merely
+        a debugging aid.
+
+        Say Y to make sparse flag questionable use of RCU-protected pointers
+
+        Say N if you are unsure.
+
 config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
@@ -970,7 +972,7 @@ config RCU_TORTURE_TEST_RUNNABLE
 
 config RCU_CPU_STALL_TIMEOUT
        int "RCU CPU stall timeout in seconds"
-       depends on TREE_RCU || TREE_PREEMPT_RCU
+       depends on RCU_STALL_COMMON
        range 3 300
        default 21
        help
@@ -1008,6 +1010,7 @@ config RCU_CPU_STALL_INFO
 config RCU_TRACE
        bool "Enable tracing for RCU"
        depends on DEBUG_KERNEL
+       select TRACE_CLOCK
        help
          This option provides tracing in RCU which presents stats
          in debugfs for debugging RCU implementation.
@@ -1015,6 +1018,8 @@ config RCU_TRACE
          Say Y here if you want to enable RCU tracing
          Say N if you are unsure.
 
+endmenu # "RCU Debugging"
+
 config KPROBES_SANITY_TEST
        bool "Kprobes sanity tests"
        depends on DEBUG_KERNEL
index a28c1415357cac9d30fc09195652fbfd099bc421..d0cdf14c651ae629cba6731debb3b6a5edee39bf 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
 }
 
 #ifdef CONFIG_MODULES
+/* Updates are protected by module mutex */
 static LIST_HEAD(module_bug_list);
 
 static const struct bug_entry *module_find_bug(unsigned long bugaddr)
index 145dec5267c91a6989402acc51ec50895e488988..5fbed5caba6e833222a62952cb4ba6e3430c053e 100644 (file)
@@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
        if (!rmap)
                return NULL;
 
+       kref_init(&rmap->refcount);
        rmap->obj = (void **)((char *)rmap + obj_offset);
 
        /* Initially assign CPUs to objects on a rota, since we have
@@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
 }
 EXPORT_SYMBOL(alloc_cpu_rmap);
 
+/**
+ * cpu_rmap_release - internal reclaiming helper called from kref_put
+ * @ref: kref to struct cpu_rmap
+ */
+static void cpu_rmap_release(struct kref *ref)
+{
+       struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount);
+       kfree(rmap);
+}
+
+/**
+ * cpu_rmap_get - internal helper to get new ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+static inline void cpu_rmap_get(struct cpu_rmap *rmap)
+{
+       kref_get(&rmap->refcount);
+}
+
+/**
+ * cpu_rmap_put - release ref on a cpu_rmap
+ * @rmap: reverse-map allocated with alloc_cpu_rmap()
+ */
+int cpu_rmap_put(struct cpu_rmap *rmap)
+{
+       return kref_put(&rmap->refcount, cpu_rmap_release);
+}
+EXPORT_SYMBOL(cpu_rmap_put);
+
 /* Reevaluate nearest object for given CPU, comparing with the given
  * neighbours at the given distance.
  */
@@ -197,8 +227,7 @@ struct irq_glue {
  * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
  * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
  *
- * Must be called in process context, before freeing the IRQs, and
- * without holding any locks required by global workqueue items.
+ * Must be called in process context, before freeing the IRQs.
  */
 void free_irq_cpu_rmap(struct cpu_rmap *rmap)
 {
@@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
                glue = rmap->obj[index];
                irq_set_affinity_notifier(glue->notify.irq, NULL);
        }
-       irq_run_affinity_notifiers();
 
-       kfree(rmap);
+       cpu_rmap_put(rmap);
 }
 EXPORT_SYMBOL(free_irq_cpu_rmap);
 
+/**
+ * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated
+ * @notify: struct irq_affinity_notify passed by irq/manage.c
+ * @mask: cpu mask for new SMP affinity
+ *
+ * This is executed in workqueue context.
+ */
 static void
 irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
 {
@@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
                pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
 }
 
+/**
+ * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem
+ * @ref: kref to struct irq_affinity_notify passed by irq/manage.c
+ */
 static void irq_cpu_rmap_release(struct kref *ref)
 {
        struct irq_glue *glue =
                container_of(ref, struct irq_glue, notify.kref);
+
+       cpu_rmap_put(glue->rmap);
        kfree(glue);
 }
 
@@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
        glue->notify.notify = irq_cpu_rmap_notify;
        glue->notify.release = irq_cpu_rmap_release;
        glue->rmap = rmap;
+       cpu_rmap_get(rmap);
        glue->index = cpu_rmap_add(rmap, glue);
        rc = irq_set_affinity_notifier(irq, &glue->notify);
-       if (rc)
+       if (rc) {
+               cpu_rmap_put(glue->rmap);
                kfree(glue);
+       }
        return rc;
 }
 EXPORT_SYMBOL(irq_cpu_rmap_add);
index 8c0e62975c88d49a09c9c29ab9e7a2b1334a6587..dc2be7ed1765b0dc3675c97b2cf862dc969dcd90 100644 (file)
@@ -162,6 +162,8 @@ static int digsig_verify_rsa(struct key *key,
        memset(out1, 0, head);
        memcpy(out1 + head, p, l);
 
+       kfree(p);
+
        err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
        if (err)
                goto err;
index 4f56a11d67fa9da105b34337280277d6fe437b2e..c0e31fe2fabf5b99c160fb01daf618cb7c0488b3 100644 (file)
@@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root,
        }
 }
 
-__always_inline void
-__rb_erase_color(struct rb_node *parent, struct rb_root *root,
+/*
+ * Inline version for rb_erase() use - we want to be able to inline
+ * and eliminate the dummy_rotate callback there
+ */
+static __always_inline void
+____rb_erase_color(struct rb_node *parent, struct rb_root *root,
        void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
 {
        struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
@@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root,
                }
        }
 }
+
+/* Non-inline version for rb_erase_augmented() use */
+void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+       void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
+{
+       ____rb_erase_color(parent, root, augment_rotate);
+}
 EXPORT_SYMBOL(__rb_erase_color);
 
 /*
@@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color);
 
 void rb_erase(struct rb_node *node, struct rb_root *root)
 {
-       rb_erase_augmented(node, root, &dummy_callbacks);
+       struct rb_node *rebalance;
+       rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
+       if (rebalance)
+               ____rb_erase_color(rebalance, root, dummy_rotate);
 }
 EXPORT_SYMBOL(rb_erase);
 
index 1324cd74faec45dc2b0ba4f0b3fb0659640e9f25..b93376c39b61308fe2ef7de2466e83306c865cf9 100644 (file)
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 
        while (start < end) {
                unsigned long *map, idx, vec;
+               unsigned shift;
 
                map = bdata->node_bootmem_map;
                idx = start - bdata->node_min_pfn;
+               shift = idx & (BITS_PER_LONG - 1);
+               /*
+                * vec holds at most BITS_PER_LONG map bits,
+                * bit 0 corresponds to start.
+                */
                vec = ~map[idx / BITS_PER_LONG];
+
+               if (shift) {
+                       vec >>= shift;
+                       if (end - start >= BITS_PER_LONG)
+                               vec |= ~map[idx / BITS_PER_LONG + 1] <<
+                                       (BITS_PER_LONG - shift);
+               }
                /*
                 * If we have a properly aligned and fully unreserved
                 * BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                        count += BITS_PER_LONG;
                        start += BITS_PER_LONG;
                } else {
-                       unsigned long off = 0;
+                       unsigned long cur = start;
 
-                       vec >>= start & (BITS_PER_LONG - 1);
-                       while (vec) {
+                       start = ALIGN(start + 1, BITS_PER_LONG);
+                       while (vec && cur != start) {
                                if (vec & 1) {
-                                       page = pfn_to_page(start + off);
+                                       page = pfn_to_page(cur);
                                        __free_pages_bootmem(page, 0);
                                        count++;
                                }
                                vec >>= 1;
-                               off++;
+                               ++cur;
                        }
-                       start = ALIGN(start + 1, BITS_PER_LONG);
                }
        }
 
index 6b807e46649703909d0662a44f88d7c4b2057635..c62bd063d766c7333ca0370444636a52ddc70802 100644 (file)
@@ -816,6 +816,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
+       unsigned int order;
        unsigned long watermark;
 
        if (fatal_signal_pending(current))
@@ -850,22 +851,16 @@ static int compact_finished(struct zone *zone,
                return COMPACT_CONTINUE;
 
        /* Direct compactor: Is a suitable page free? */
-       if (cc->page) {
-               /* Was a suitable page captured? */
-               if (*cc->page)
+       for (order = cc->order; order < MAX_ORDER; order++) {
+               struct free_area *area = &zone->free_area[order];
+
+               /* Job done if page is free of the right migratetype */
+               if (!list_empty(&area->free_list[cc->migratetype]))
+                       return COMPACT_PARTIAL;
+
+               /* Job done if allocation would set block type */
+               if (cc->order >= pageblock_order && area->nr_free)
                        return COMPACT_PARTIAL;
-       } else {
-               unsigned int order;
-               for (order = cc->order; order < MAX_ORDER; order++) {
-                       struct free_area *area = &zone->free_area[cc->order];
-                       /* Job done if page is free of the right migratetype */
-                       if (!list_empty(&area->free_list[cc->migratetype]))
-                               return COMPACT_PARTIAL;
-
-                       /* Job done if allocation would set block type */
-                       if (cc->order >= pageblock_order && area->nr_free)
-                               return COMPACT_PARTIAL;
-               }
        }
 
        return COMPACT_CONTINUE;
@@ -921,60 +916,6 @@ unsigned long compaction_suitable(struct zone *zone, int order)
        return COMPACT_CONTINUE;
 }
 
-static void compact_capture_page(struct compact_control *cc)
-{
-       unsigned long flags;
-       int mtype, mtype_low, mtype_high;
-
-       if (!cc->page || *cc->page)
-               return;
-
-       /*
-        * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
-        * regardless of the migratetype of the freelist is is captured from.
-        * This is fine because the order for a high-order MIGRATE_MOVABLE
-        * allocation is typically at least a pageblock size and overall
-        * fragmentation is not impaired. Other allocation types must
-        * capture pages from their own migratelist because otherwise they
-        * could pollute other pageblocks like MIGRATE_MOVABLE with
-        * difficult to move pages and making fragmentation worse overall.
-        */
-       if (cc->migratetype == MIGRATE_MOVABLE) {
-               mtype_low = 0;
-               mtype_high = MIGRATE_PCPTYPES;
-       } else {
-               mtype_low = cc->migratetype;
-               mtype_high = cc->migratetype + 1;
-       }
-
-       /* Speculatively examine the free lists without zone lock */
-       for (mtype = mtype_low; mtype < mtype_high; mtype++) {
-               int order;
-               for (order = cc->order; order < MAX_ORDER; order++) {
-                       struct page *page;
-                       struct free_area *area;
-                       area = &(cc->zone->free_area[order]);
-                       if (list_empty(&area->free_list[mtype]))
-                               continue;
-
-                       /* Take the lock and attempt capture of the page */
-                       if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
-                               return;
-                       if (!list_empty(&area->free_list[mtype])) {
-                               page = list_entry(area->free_list[mtype].next,
-                                                       struct page, lru);
-                               if (capture_free_page(page, cc->order, mtype)) {
-                                       spin_unlock_irqrestore(&cc->zone->lock,
-                                                                       flags);
-                                       *cc->page = page;
-                                       return;
-                               }
-                       }
-                       spin_unlock_irqrestore(&cc->zone->lock, flags);
-               }
-       }
-}
-
 static int compact_zone(struct zone *zone, struct compact_control *cc)
 {
        int ret;
@@ -1054,9 +995,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                                goto out;
                        }
                }
-
-               /* Capture a page now if it is a suitable size */
-               compact_capture_page(cc);
        }
 
 out:
@@ -1069,8 +1007,7 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                bool sync, bool *contended,
-                                struct page **page)
+                                bool sync, bool *contended)
 {
        unsigned long ret;
        struct compact_control cc = {
@@ -1080,7 +1017,6 @@ static unsigned long compact_zone_order(struct zone *zone,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
                .sync = sync,
-               .page = page,
        };
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
@@ -1110,7 +1046,7 @@ int sysctl_extfrag_threshold = 500;
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
-                       bool sync, bool *contended, struct page **page)
+                       bool sync, bool *contended)
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1136,7 +1072,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                int status;
 
                status = compact_zone_order(zone, order, gfp_mask, sync,
-                                               contended, page);
+                                               contended);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
@@ -1192,7 +1128,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
        struct compact_control cc = {
                .order = order,
                .sync = false,
-               .page = NULL,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -1203,14 +1138,13 @@ static int compact_node(int nid)
        struct compact_control cc = {
                .order = -1,
                .sync = true,
-               .page = NULL,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);
 }
 
 /* Compact all nodes in the system */
-static int compact_nodes(void)
+static void compact_nodes(void)
 {
        int nid;
 
@@ -1219,8 +1153,6 @@ static int compact_nodes(void)
 
        for_each_online_node(nid)
                compact_node(nid);
-
-       return COMPACT_COMPLETE;
 }
 
 /* The written value is actually unused, all memory is compacted */
@@ -1231,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
                        void __user *buffer, size_t *length, loff_t *ppos)
 {
        if (write)
-               return compact_nodes();
+               compact_nodes();
 
        return 0;
 }
index 9e894edc7811ca66bda078e9606011ca25035238..b5783d81eda90fc9a808478ac6dcea4f74b4424e 100644 (file)
@@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                goto out;
 
+       /* Avoid dumping huge zero page */
+       if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
+               return ERR_PTR(-EFAULT);
+
        page = pmd_page(*pmd);
        VM_BUG_ON(!PageHead(page));
        if (flags & FOLL_TOUCH) {
@@ -1819,9 +1823,19 @@ int split_huge_page(struct page *page)
 
        BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
        BUG_ON(!PageAnon(page));
-       anon_vma = page_lock_anon_vma_read(page);
+
+       /*
+        * The caller does not necessarily hold an mmap_sem that would prevent
+        * the anon_vma disappearing so we first we take a reference to it
+        * and then lock the anon_vma for write. This is similar to
+        * page_lock_anon_vma_read except the write lock is taken to serialise
+        * against parallel split or collapse operations.
+        */
+       anon_vma = page_get_anon_vma(page);
        if (!anon_vma)
                goto out;
+       anon_vma_lock_write(anon_vma);
+
        ret = 0;
        if (!PageCompound(page))
                goto out_unlock;
@@ -1832,7 +1846,8 @@ int split_huge_page(struct page *page)
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       page_unlock_anon_vma_read(anon_vma);
+       anon_vma_unlock(anon_vma);
+       put_anon_vma(anon_vma);
 out:
        return ret;
 }
index 4f3ea0b1e57ce33bbf71bd1d3f41e04e11ae05dc..546db81820e45e8279f5a6425794da2980031dc8 100644 (file)
@@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
+                       pte = arch_make_huge_pte(pte, vma, NULL, 0);
                        set_huge_pte_at(mm, address, ptep, pte);
                        pages++;
                }
index d597f94cc2059ade4d9fd808457d240538faac55..9ba21100ebf3b01bb0f2296baaf55ec41e303450 100644 (file)
@@ -135,7 +135,6 @@ struct compact_control {
        int migratetype;                /* MOVABLE, RECLAIMABLE etc */
        struct zone *zone;
        bool contended;                 /* True if a lock was contended */
-       struct page **page;             /* Page captured of requested size */
 };
 
 unsigned long
index 625905523c2a1592f539c46f6721723d62376648..88adc8afb6103d6c1ba2eb7e20c1ed226cf2b1d3 100644 (file)
@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
                }
 
                this->size += next->size;
-               memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+               /* move forward from next + 1, index of which is i + 2 */
+               memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
                type->cnt--;
        }
 }
index 09255ec8159c459624e66ee5ac17ca092d2fcd6b..fbb60b103e64b11021475a749b86da6f7eb7137e 100644 (file)
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
        if (memcg) {
                s->memcg_params->memcg = memcg;
                s->memcg_params->root_cache = root_cache;
-       }
+       } else
+               s->memcg_params->is_root_cache = true;
+
        return 0;
 }
 
index 3b676b0c5c3ecca91d5e91d8ef2adc2982353f57..2fd8b4af47440a39a31d48a1096e24b571028455 100644 (file)
@@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
        if (is_write_migration_entry(entry))
                pte = pte_mkwrite(pte);
 #ifdef CONFIG_HUGETLB_PAGE
-       if (PageHuge(new))
+       if (PageHuge(new)) {
                pte = pte_mkhuge(pte);
+               pte = arch_make_huge_pte(pte, vma, new, 0);
+       }
 #endif
        flush_cache_page(vma, addr, pte_pfn(pte));
        set_pte_at(mm, addr, ptep, pte);
@@ -1679,9 +1681,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        page_xchg_last_nid(new_page, page_last_nid(page));
 
        isolated = numamigrate_isolate_page(pgdat, page);
-       if (!isolated) {
+
+       /*
+        * Failing to isolate or a GUP pin prevents migration. The expected
+        * page count is 2. 1 for anonymous pages without a mapping and 1
+        * for the callers pin. If the page was isolated, the page will
+        * need to be put back on the LRU.
+        */
+       if (!isolated || page_count(page) != 2) {
                count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
                put_page(new_page);
+               if (isolated) {
+                       putback_lru_page(page);
+                       isolated = 0;
+                       goto out;
+               }
                goto out_keep_locked;
        }
 
index f0b9ce572fc78ddbd0118db8aaa60d9b56086bf9..c9bd528b01d2361aa6e37f9791a831c3b2a443e7 100644 (file)
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
 static int do_mlockall(int flags)
 {
        struct vm_area_struct * vma, * prev = NULL;
-       unsigned int def_flags = 0;
 
        if (flags & MCL_FUTURE)
-               def_flags = VM_LOCKED;
-       current->mm->def_flags = def_flags;
+               current->mm->def_flags |= VM_LOCKED;
+       else
+               current->mm->def_flags &= ~VM_LOCKED;
        if (flags == MCL_FUTURE)
                goto out;
 
index f54b235f29a98c3b3a91c2c1e6b4b50ef5e8f287..09da0b2649822beae4ee559c265d087e4b93db15 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -32,6 +32,7 @@
 #include <linux/khugepaged.h>
 #include <linux/uprobes.h>
 #include <linux/rbtree_augmented.h>
+#include <linux/sched/sysctl.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -2886,7 +2887,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               down_write(&anon_vma->root->rwsem);
+               down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
                 * anon_vma->root->rwsem. If some other vma in this mm shares
@@ -2943,7 +2944,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
  * vma in this mm is backed by the same anon_vma or address_space.
  *
  * We can take all the locks in random order because the VM code
- * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
+ * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
  * takes more than one of them in a row. Secondly we're protected
  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
  *
index e1031e1f6a61b68ba5055a162ea0d9161b510ba2..f9766f4602999f0fd6928a4c4b93f69ba0748b6a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/mmu_notifier.h>
+#include <linux/sched/sysctl.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
index 79c3cac87afa1d1f96fc9397f50bb370980e0a84..b20db4e222630cebde1ca16879d817977e881618 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/audit.h>
+#include <linux/sched/sysctl.h>
 
 #include <asm/uaccess.h>
 #include <asm/tlb.h>
index 0713bfbf095410bbef16023ac9a97d36bfda9183..66a0024becd9f6c87f5e437c92ce6852657638a9 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/buffer_head.h> /* __set_page_dirty_buffers */
 #include <linux/pagevec.h>
 #include <linux/timer.h>
+#include <linux/sched/rt.h>
 #include <trace/events/writeback.h>
 
 /*
index bc6cc0e913bd7d18214218b5fe3d7226e42e24d6..d1107adf174a5390c597414ea335494e857f1af2 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/prefetch.h>
 #include <linux/migrate.h>
 #include <linux/page-debug-flags.h>
+#include <linux/sched/rt.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -773,6 +774,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
        set_pageblock_migratetype(page, MIGRATE_CMA);
        __free_pages(page, pageblock_order);
        totalram_pages += pageblock_nr_pages;
+#ifdef CONFIG_HIGHMEM
+       if (PageHighMem(page))
+               totalhigh_pages += pageblock_nr_pages;
+#endif
 }
 #endif
 
@@ -1384,14 +1389,8 @@ void split_page(struct page *page, unsigned int order)
                set_page_refcounted(page + i);
 }
 
-/*
- * Similar to the split_page family of functions except that the page
- * required at the given order and being isolated now to prevent races
- * with parallel allocators
- */
-int capture_free_page(struct page *page, int alloc_order, int migratetype)
+static int __isolate_free_page(struct page *page, unsigned int order)
 {
-       unsigned int order;
        unsigned long watermark;
        struct zone *zone;
        int mt;
@@ -1399,7 +1398,6 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
        BUG_ON(!PageBuddy(page));
 
        zone = page_zone(page);
-       order = page_order(page);
        mt = get_pageblock_migratetype(page);
 
        if (mt != MIGRATE_ISOLATE) {
@@ -1408,7 +1406,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
                if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
                        return 0;
 
-               __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
+               __mod_zone_freepage_state(zone, -(1UL << order), mt);
        }
 
        /* Remove page from free list */
@@ -1416,11 +1414,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
        zone->free_area[order].nr_free--;
        rmv_page_order(page);
 
-       if (alloc_order != order)
-               expand(zone, page, alloc_order, order,
-                       &zone->free_area[order], migratetype);
-
-       /* Set the pageblock if the captured page is at least a pageblock */
+       /* Set the pageblock if the isolated page is at least a pageblock */
        if (order >= pageblock_order - 1) {
                struct page *endpage = page + (1 << order) - 1;
                for (; page < endpage; page += pageblock_nr_pages) {
@@ -1431,7 +1425,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
                }
        }
 
-       return 1UL << alloc_order;
+       return 1UL << order;
 }
 
 /*
@@ -1449,10 +1443,9 @@ int split_free_page(struct page *page)
        unsigned int order;
        int nr_pages;
 
-       BUG_ON(!PageBuddy(page));
        order = page_order(page);
 
-       nr_pages = capture_free_page(page, order, 0);
+       nr_pages = __isolate_free_page(page, order);
        if (!nr_pages)
                return 0;
 
@@ -2136,8 +2129,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        bool *contended_compaction, bool *deferred_compaction,
        unsigned long *did_some_progress)
 {
-       struct page *page = NULL;
-
        if (!order)
                return NULL;
 
@@ -2149,16 +2140,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
        current->flags |= PF_MEMALLOC;
        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
                                                nodemask, sync_migration,
-                                               contended_compaction, &page);
+                                               contended_compaction);
        current->flags &= ~PF_MEMALLOC;
 
-       /* If compaction captured a page, prep and use it */
-       if (page) {
-               prep_new_page(page, order, gfp_mask);
-               goto got_page;
-       }
-
        if (*did_some_progress != COMPACT_SKIPPED) {
+               struct page *page;
+
                /* Page migration frees to the PCP lists but we want merging */
                drain_pages(get_cpu());
                put_cpu();
@@ -2168,7 +2155,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
                                alloc_flags & ~ALLOC_NO_WATERMARKS,
                                preferred_zone, migratetype);
                if (page) {
-got_page:
                        preferred_zone->compact_blockskip_flush = false;
                        preferred_zone->compact_considered = 0;
                        preferred_zone->compact_defer_shift = 0;
@@ -4435,10 +4421,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  * round what is now in bits to nearest long in bits, then return it in
  * bytes.
  */
-static unsigned long __init usemap_size(unsigned long zonesize)
+static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
 {
        unsigned long usemapsize;
 
+       zonesize += zone_start_pfn & (pageblock_nr_pages-1);
        usemapsize = roundup(zonesize, pageblock_nr_pages);
        usemapsize = usemapsize >> pageblock_order;
        usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4448,17 +4435,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
 }
 
 static void __init setup_usemap(struct pglist_data *pgdat,
-                               struct zone *zone, unsigned long zonesize)
+                               struct zone *zone,
+                               unsigned long zone_start_pfn,
+                               unsigned long zonesize)
 {
-       unsigned long usemapsize = usemap_size(zonesize);
+       unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
        zone->pageblock_flags = NULL;
        if (usemapsize)
                zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
                                                                   usemapsize);
 }
 #else
-static inline void setup_usemap(struct pglist_data *pgdat,
-                               struct zone *zone, unsigned long zonesize) {}
+static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
+                               unsigned long zone_start_pfn, unsigned long zonesize) {}
 #endif /* CONFIG_SPARSEMEM */
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4609,7 +4598,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                        continue;
 
                set_pageblock_order();
-               setup_usemap(pgdat, zone, size);
+               setup_usemap(pgdat, zone, zone_start_pfn, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
                BUG_ON(ret);
@@ -5604,7 +5593,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
        pfn &= (PAGES_PER_SECTION-1);
        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 #else
-       pfn = pfn - zone->zone_start_pfn;
+       pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 #endif /* CONFIG_SPARSEMEM */
 }
index 8e1d89d2b1c1cada56f190bcca967525a2b7a2e7..553921511e4ee585cc9dd9e5bb52be59ef83298b 100644 (file)
@@ -440,7 +440,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
        /* this is an hash collision with the temporary selected node. Choose
         * the one with the lowest address
         */
-       if ((tmp_max == max) &&
+       if ((tmp_max == max) && max_orig_node &&
            (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0))
                goto out;
 
@@ -738,6 +738,7 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
        struct arphdr *arphdr;
        struct ethhdr *ethhdr;
        __be32 ip_src, ip_dst;
+       uint8_t *hw_src, *hw_dst;
        uint16_t type = 0;
 
        /* pull the ethernet header */
@@ -777,9 +778,23 @@ static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
        ip_src = batadv_arp_ip_src(skb, hdr_size);
        ip_dst = batadv_arp_ip_dst(skb, hdr_size);
        if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) ||
-           ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst))
+           ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) ||
+           ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) ||
+           ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst))
                goto out;
 
+       hw_src = batadv_arp_hw_src(skb, hdr_size);
+       if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src))
+               goto out;
+
+       /* we don't care about the destination MAC address in ARP requests */
+       if (arphdr->ar_op != htons(ARPOP_REQUEST)) {
+               hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+               if (is_zero_ether_addr(hw_dst) ||
+                   is_multicast_ether_addr(hw_dst))
+                       goto out;
+       }
+
        type = ntohs(arphdr->ar_op);
 out:
        return type;
@@ -1012,6 +1027,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
         */
        ret = !batadv_is_my_client(bat_priv, hw_dst);
 out:
+       if (ret)
+               kfree_skb(skb);
        /* if ret == false -> packet has to be delivered to the interface */
        return ret;
 }
index 25bfce0666ebff70258019ddb1bba9b6f1eca967..4925a02ae7e4db7df4d4cdfc972e68375743861c 100644 (file)
@@ -249,12 +249,12 @@ static void hci_conn_disconnect(struct hci_conn *conn)
        __u8 reason = hci_proto_disconn_ind(conn);
 
        switch (conn->type) {
-       case ACL_LINK:
-               hci_acl_disconn(conn, reason);
-               break;
        case AMP_LINK:
                hci_amp_disconn(conn, reason);
                break;
+       default:
+               hci_acl_disconn(conn, reason);
+               break;
        }
 }
 
index 596660d37c5e56d6a0178b16b288a2d3c868f965..0f78e34220c9025aae08f3b38b49924b6c397ff6 100644 (file)
@@ -2810,14 +2810,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
        if (conn) {
                hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
 
-               hci_dev_lock(hdev);
-               if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
-                   !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
-                       mgmt_device_connected(hdev, &conn->dst, conn->type,
-                                             conn->dst_type, 0, NULL, 0,
-                                             conn->dev_class);
-               hci_dev_unlock(hdev);
-
                /* Send to upper protocol */
                l2cap_recv_acldata(conn, skb, flags);
                return;
index 705078a0cc393c023054ad1d55aea1a548e3da1d..81b44481d0d93a8dd8513dd15f97f9a21b8a32d6 100644 (file)
@@ -2688,7 +2688,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        if (ev->opcode != HCI_OP_NOP)
                del_timer(&hdev->cmd_timer);
 
-       if (ev->ncmd) {
+       if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
                atomic_set(&hdev->cmd_cnt, 1);
                if (!skb_queue_empty(&hdev->cmd_q))
                        queue_work(hdev->workqueue, &hdev->cmd_work);
index b2bcbe2dc328ba8473227035c39a7ff467ee5659..a7352ff3fd1e7884c2fc43142fba9bc0033063b8 100644 (file)
@@ -931,7 +931,7 @@ static int hidp_setup_hid(struct hidp_session *session,
        hid->version = req->version;
        hid->country = req->country;
 
-       strncpy(hid->name, req->name, 128);
+       strncpy(hid->name, req->name, sizeof(req->name) - 1);
 
        snprintf(hid->phys, sizeof(hid->phys), "%pMR",
                 &bt_sk(session->ctrl_sock->sk)->src);
index 2c78208d793eb8db838cf85a480e029a4dc3567b..22e658322845b9f16bcf8de52c1ba7c401bfef87 100644 (file)
@@ -3727,6 +3727,17 @@ sendresp:
 static int l2cap_connect_req(struct l2cap_conn *conn,
                             struct l2cap_cmd_hdr *cmd, u8 *data)
 {
+       struct hci_dev *hdev = conn->hcon->hdev;
+       struct hci_conn *hcon = conn->hcon;
+
+       hci_dev_lock(hdev);
+       if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+           !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
+               mgmt_device_connected(hdev, &hcon->dst, hcon->type,
+                                     hcon->dst_type, 0, NULL, 0,
+                                     hcon->dev_class);
+       hci_dev_unlock(hdev);
+
        l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
        return 0;
 }
index 531a93d613d4f3f86eff5174cdbb1236b75cfb53..57f250c20e399851ca6998d9813dc60e3ffa3455 100644 (file)
@@ -352,7 +352,7 @@ static void __sco_sock_close(struct sock *sk)
 
        case BT_CONNECTED:
        case BT_CONFIG:
-               if (sco_pi(sk)->conn) {
+               if (sco_pi(sk)->conn->hcon) {
                        sk->sk_state = BT_DISCONN;
                        sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
                        hci_conn_put(sco_pi(sk)->conn->hcon);
index 68a9587c96945cdd3264cc310b68b7be95cfd953..5abefb12891d5144f97d1feacb0803937b0c9765 100644 (file)
@@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 
        skb_pull(skb, sizeof(code));
 
+       /*
+        * The SMP context must be initialized for all other PDUs except
+        * pairing and security requests. If we get any other PDU when
+        * not initialized simply disconnect (done if this function
+        * returns an error).
+        */
+       if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
+           !conn->smp_chan) {
+               BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
+               kfree_skb(skb);
+               return -ENOTSUPP;
+       }
+
        switch (code) {
        case SMP_CMD_PAIRING_REQ:
                reason = smp_cmd_pairing_req(conn, skb);
index 7f884e3fb9554328b1e39650d5708a21b9f92a57..8660ea3be7054571defa59bcfbfc0cd0dd7ab99e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/etherdevice.h>
 #include <linux/llc.h>
 #include <linux/slab.h>
+#include <linux/pkt_sched.h>
 #include <net/net_namespace.h>
 #include <net/llc.h>
 #include <net/llc_pdu.h>
@@ -40,6 +41,7 @@ static void br_send_bpdu(struct net_bridge_port *p,
 
        skb->dev = p->dev;
        skb->protocol = htons(ETH_P_802_2);
+       skb->priority = TC_PRIO_CONTROL;
 
        skb_reserve(skb, LLC_RESERVE);
        memcpy(__skb_put(skb, length), data, length);
index 0337e2b768628d353936dc09ba4fe483b2c5b638..368f9c3f9dc6505e693f56066da6dcb19ec2ebd6 100644 (file)
@@ -187,7 +187,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                skb_queue_walk(queue, skb) {
                        *peeked = skb->peeked;
                        if (flags & MSG_PEEK) {
-                               if (*off >= skb->len) {
+                               if (*off >= skb->len && skb->len) {
                                        *off -= skb->len;
                                        continue;
                                }
index 515473ee52cbb6327a9310922d0fb1c3d763df2f..f64e439b4a00fe206fb634745a470475eb31cbb9 100644 (file)
@@ -6121,6 +6121,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
 
 static const struct ethtool_ops default_ethtool_ops;
 
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                   const struct ethtool_ops *ops)
+{
+       if (dev->ethtool_ops == &default_ethtool_ops)
+               dev->ethtool_ops = ops;
+}
+EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
+
 /**
  *     alloc_netdev_mqs - allocate network device
  *     @sizeof_priv:   size of private data to allocate space for
index b29dacf900f9496a42a565a3b2c48ad9cf502c74..e6e1cbe863f57cc18dfc3e25ed2eeffca45e618b 100644 (file)
@@ -1781,10 +1781,13 @@ static ssize_t pktgen_thread_write(struct file *file,
                        return -EFAULT;
                i += len;
                mutex_lock(&pktgen_thread_lock);
-               pktgen_add_device(t, f);
+               ret = pktgen_add_device(t, f);
                mutex_unlock(&pktgen_thread_lock);
-               ret = count;
-               sprintf(pg_result, "OK: add_device=%s", f);
+               if (!ret) {
+                       ret = count;
+                       sprintf(pg_result, "OK: add_device=%s", f);
+               } else
+                       sprintf(pg_result, "ERROR: can not add device %s", f);
                goto out;
        }
 
index c31d9e8668c30346894adbf3be55eed4beeb1258..4425148d2b51592626b92a1451d9bc1208213fb8 100644 (file)
@@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
        struct fastopen_queue *fastopenq =
            inet_csk(lsk)->icsk_accept_queue.fastopenq;
 
-       BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
-
        tcp_sk(sk)->fastopen_rsk = NULL;
        spin_lock_bh(&fastopenq->lock);
        fastopenq->qlen--;
index 57fb1ee6649f65a0994d86c6f8763fe6e14ba081..905dcc6ad1e3b480c01f87df5157f4e37de112a1 100644 (file)
@@ -35,6 +35,7 @@
 #include <net/sock.h>
 #include <net/compat.h>
 #include <net/scm.h>
+#include <net/cls_cgroup.h>
 
 
 /*
@@ -302,8 +303,10 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
                }
                /* Bump the usage count and install the file. */
                sock = sock_from_file(fp[i], &err);
-               if (sock)
+               if (sock) {
                        sock_update_netprioidx(sock->sk, current);
+                       sock_update_classid(sock->sk, current);
+               }
                fd_install(new_fd, get_file(fp[i]));
        }
 
index 3ab989b0de42a0bfe7905ee99d98901ee9e9a075..32443ebc3e890532810b94c6196b182ccdf558d4 100644 (file)
@@ -683,7 +683,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
        new->network_header     = old->network_header;
        new->mac_header         = old->mac_header;
        new->inner_transport_header = old->inner_transport_header;
-       new->inner_network_header = old->inner_transport_header;
+       new->inner_network_header = old->inner_network_header;
        skb_dst_copy(new, old);
        new->rxhash             = old->rxhash;
        new->ooo_okay           = old->ooo_okay;
@@ -1649,7 +1649,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 
 static struct page *linear_to_page(struct page *page, unsigned int *len,
                                   unsigned int *offset,
-                                  struct sk_buff *skb, struct sock *sk)
+                                  struct sock *sk)
 {
        struct page_frag *pfrag = sk_page_frag(sk);
 
@@ -1682,14 +1682,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
 static bool spd_fill_page(struct splice_pipe_desc *spd,
                          struct pipe_inode_info *pipe, struct page *page,
                          unsigned int *len, unsigned int offset,
-                         struct sk_buff *skb, bool linear,
+                         bool linear,
                          struct sock *sk)
 {
        if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
                return true;
 
        if (linear) {
-               page = linear_to_page(page, len, &offset, skb, sk);
+               page = linear_to_page(page, len, &offset, sk);
                if (!page)
                        return true;
        }
@@ -1706,23 +1706,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
        return false;
 }
 
-static inline void __segment_seek(struct page **page, unsigned int *poff,
-                                 unsigned int *plen, unsigned int off)
-{
-       unsigned long n;
-
-       *poff += off;
-       n = *poff / PAGE_SIZE;
-       if (n)
-               *page = nth_page(*page, n);
-
-       *poff = *poff % PAGE_SIZE;
-       *plen -= off;
-}
-
 static bool __splice_segment(struct page *page, unsigned int poff,
                             unsigned int plen, unsigned int *off,
-                            unsigned int *len, struct sk_buff *skb,
+                            unsigned int *len,
                             struct splice_pipe_desc *spd, bool linear,
                             struct sock *sk,
                             struct pipe_inode_info *pipe)
@@ -1737,23 +1723,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
        }
 
        /* ignore any bits we already processed */
-       if (*off) {
-               __segment_seek(&page, &poff, &plen, *off);
-               *off = 0;
-       }
+       poff += *off;
+       plen -= *off;
+       *off = 0;
 
        do {
                unsigned int flen = min(*len, plen);
 
-               /* the linear region may spread across several pages  */
-               flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
-
-               if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
+               if (spd_fill_page(spd, pipe, page, &flen, poff,
+                                 linear, sk))
                        return true;
-
-               __segment_seek(&page, &poff, &plen, flen);
+               poff += flen;
+               plen -= flen;
                *len -= flen;
-
        } while (*len && plen);
 
        return false;
@@ -1777,7 +1759,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
        if (__splice_segment(virt_to_page(skb->data),
                             (unsigned long) skb->data & (PAGE_SIZE - 1),
                             skb_headlen(skb),
-                            offset, len, skb, spd,
+                            offset, len, spd,
                             skb_head_is_locked(skb),
                             sk, pipe))
                return true;
@@ -1790,7 +1772,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
 
                if (__splice_segment(skb_frag_page(f),
                                     f->page_offset, skb_frag_size(f),
-                                    offset, len, skb, spd, false, sk, pipe))
+                                    offset, len, spd, false, sk, pipe))
                        return true;
        }
 
index a0d8392491c3c3e947876c943457227e4ed4ad92..a69b4e4a02b5099043d98e5278355274e18ea72b 100644 (file)
@@ -269,7 +269,11 @@ static void ah_input_done(struct crypto_async_request *base, int err)
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, ihl);
        __skb_pull(skb, ah_hlen + ihl);
-       skb_set_transport_header(skb, -ihl);
+
+       if (x->props.mode == XFRM_MODE_TUNNEL)
+               skb_reset_transport_header(skb);
+       else
+               skb_set_transport_header(skb, -ihl);
 out:
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_input_resume(skb, err);
@@ -381,7 +385,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, ihl);
        __skb_pull(skb, ah_hlen + ihl);
-       skb_set_transport_header(skb, -ihl);
+       if (x->props.mode == XFRM_MODE_TUNNEL)
+               skb_reset_transport_header(skb);
+       else
+               skb_set_transport_header(skb, -ihl);
 
        err = nexthdr;
 
@@ -413,9 +420,12 @@ static void ah4_err(struct sk_buff *skb, u32 info)
        if (!x)
                return;
 
-       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+               atomic_inc(&flow_cache_genid);
+               rt_genid_bump(net);
+
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
-       else
+       else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
        xfrm_state_put(x);
 }
index 9547a273b9e9829692e85d89627a66eee5d182b9..ded146b217f10dbd511da977fe1e146f848c8272 100644 (file)
@@ -928,24 +928,25 @@ static void parp_redo(struct sk_buff *skb)
 static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
                   struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct arphdr *arp;
+       const struct arphdr *arp;
+
+       if (dev->flags & IFF_NOARP ||
+           skb->pkt_type == PACKET_OTHERHOST ||
+           skb->pkt_type == PACKET_LOOPBACK)
+               goto freeskb;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto out_of_mem;
 
        /* ARP header, plus 2 device addresses, plus 2 IP addresses.  */
        if (!pskb_may_pull(skb, arp_hdr_len(dev)))
                goto freeskb;
 
        arp = arp_hdr(skb);
-       if (arp->ar_hln != dev->addr_len ||
-           dev->flags & IFF_NOARP ||
-           skb->pkt_type == PACKET_OTHERHOST ||
-           skb->pkt_type == PACKET_LOOPBACK ||
-           arp->ar_pln != 4)
+       if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
                goto freeskb;
 
-       skb = skb_share_check(skb, GFP_ATOMIC);
-       if (skb == NULL)
-               goto out_of_mem;
-
        memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
 
        return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
index 424fafbc8cb02c49fb2dd3551f3583290f2fc330..b28e863fe0a7143b199bee51cfdf242c125f31cf 100644 (file)
@@ -85,3 +85,28 @@ out:
        return err;
 }
 EXPORT_SYMBOL(ip4_datagram_connect);
+
+void ip4_datagram_release_cb(struct sock *sk)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+       const struct ip_options_rcu *inet_opt;
+       __be32 daddr = inet->inet_daddr;
+       struct flowi4 fl4;
+       struct rtable *rt;
+
+       if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
+               return;
+
+       rcu_read_lock();
+       inet_opt = rcu_dereference(inet->inet_opt);
+       if (inet_opt && inet_opt->opt.srr)
+               daddr = inet_opt->opt.faddr;
+       rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
+                                  inet->inet_saddr, inet->inet_dport,
+                                  inet->inet_sport, sk->sk_protocol,
+                                  RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+       if (!IS_ERR(rt))
+               __sk_dst_set(sk, &rt->dst);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
index b61e9deb7c7ecc9cae92ab0365b871a773627cc4..3b4f0cd2e63edbd136683577873b288712a4b92a 100644 (file)
@@ -346,7 +346,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
 
        pskb_trim(skb, skb->len - alen - padlen - 2);
        __skb_pull(skb, hlen);
-       skb_set_transport_header(skb, -ihl);
+       if (x->props.mode == XFRM_MODE_TUNNEL)
+               skb_reset_transport_header(skb);
+       else
+               skb_set_transport_header(skb, -ihl);
 
        err = nexthdr[1];
 
@@ -499,9 +502,12 @@ static void esp4_err(struct sk_buff *skb, u32 info)
        if (!x)
                return;
 
-       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+               atomic_inc(&flow_cache_genid);
+               rt_genid_bump(net);
+
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
-       else
+       else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
        xfrm_state_put(x);
 }
index 303012adf9e6e0b442268b6f53b50ac8aea745fe..e81b1caf2ea2a0ceaa20ef8e1847ebd4f71cd98e 100644 (file)
@@ -963,8 +963,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
                        ptr--;
                }
                if (tunnel->parms.o_flags&GRE_CSUM) {
+                       int offset = skb_transport_offset(skb);
+
                        *ptr = 0;
-                       *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
+                       *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
+                                                                skb->len - offset,
+                                                                0));
                }
        }
 
index 3c9d20880283de0f9b5244eae8184e76d9a20dcd..d9c4f113d7093bba7eba2beefc31cd0af4b9bb95 100644 (file)
@@ -590,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_TTL:
                if (optlen < 1)
                        goto e_inval;
-               if (val != -1 && (val < 0 || val > 255))
+               if (val != -1 && (val < 1 || val > 255))
                        goto e_inval;
                inet->uc_ttl = val;
                break;
index d3ab47e19a896277161c2bf1b87706c20ebd73be..9a46daed2f3c05be9ed82138edac731fcc571145 100644 (file)
@@ -47,9 +47,12 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
        if (!x)
                return;
 
-       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
+               atomic_inc(&flow_cache_genid);
+               rt_genid_bump(net);
+
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
-       else
+       else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
        xfrm_state_put(x);
 }
index 8f3d05424a3e8f2f318c36bf007fd17724533997..6f9c07268cf6d433379898421e978d74fa49952f 100644 (file)
@@ -738,6 +738,7 @@ struct proto ping_prot = {
        .recvmsg =      ping_recvmsg,
        .bind =         ping_bind,
        .backlog_rcv =  ping_queue_rcv_skb,
+       .release_cb =   ip4_datagram_release_cb,
        .hash =         ping_v4_hash,
        .unhash =       ping_v4_unhash,
        .get_port =     ping_v4_get_port,
index 73d1e4df4bf630f176f385b96639b4e803469458..6f08991409c3ad9d3620357d04d1f59005f7e2fa 100644 (file)
@@ -894,6 +894,7 @@ struct proto raw_prot = {
        .recvmsg           = raw_recvmsg,
        .bind              = raw_bind,
        .backlog_rcv       = raw_rcv_skb,
+       .release_cb        = ip4_datagram_release_cb,
        .hash              = raw_hash_sk,
        .unhash            = raw_unhash_sk,
        .obj_size          = sizeof(struct raw_sock),
index 844a9ef60dbd89f459515101ebb0db6e7cfaa8a3..a0fcc47fee732744baf42bf1e61772faad87982d 100644 (file)
@@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        struct dst_entry *dst = &rt->dst;
        struct fib_result res;
 
+       if (dst_metric_locked(dst, RTAX_MTU))
+               return;
+
        if (dst->dev->mtu < mtu)
                return;
 
@@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
 }
 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
 
-void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 {
        const struct iphdr *iph = (const struct iphdr *) skb->data;
        struct flowi4 fl4;
@@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
                ip_rt_put(rt);
        }
 }
+
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
+{
+       const struct iphdr *iph = (const struct iphdr *) skb->data;
+       struct flowi4 fl4;
+       struct rtable *rt;
+       struct dst_entry *dst;
+       bool new = false;
+
+       bh_lock_sock(sk);
+       rt = (struct rtable *) __sk_dst_get(sk);
+
+       if (sock_owned_by_user(sk) || !rt) {
+               __ipv4_sk_update_pmtu(skb, sk, mtu);
+               goto out;
+       }
+
+       __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+       if (!__sk_dst_check(sk, 0)) {
+               rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+               if (IS_ERR(rt))
+                       goto out;
+
+               new = true;
+       }
+
+       __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+
+       dst = dst_check(&rt->dst, 0);
+       if (!dst) {
+               if (new)
+                       dst_release(&rt->dst);
+
+               rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+               if (IS_ERR(rt))
+                       goto out;
+
+               new = true;
+       }
+
+       if (new)
+               __sk_dst_set(sk, &rt->dst);
+
+out:
+       bh_unlock_sock(sk);
+}
 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
 
 void ipv4_redirect(struct sk_buff *skb, struct net *net,
@@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
        if (!mtu || time_after_eq(jiffies, rt->dst.expires))
                mtu = dst_metric_raw(dst, RTAX_MTU);
 
-       if (mtu && rt_is_output_route(rt))
+       if (mtu)
                return mtu;
 
        mtu = dst->dev->mtu;
index 1ca253635f7acdda23624c28ae1ead4645bd0dd5..2aa69c8ae60c1990c76e69fe5aef46b990b6da6e 100644 (file)
@@ -1428,12 +1428,12 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
 }
 #endif
 
-static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
+static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 {
        struct sk_buff *skb;
        u32 offset;
 
-       skb_queue_walk(&sk->sk_receive_queue, skb) {
+       while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
                offset = seq - TCP_SKB_CB(skb)->seq;
                if (tcp_hdr(skb)->syn)
                        offset--;
@@ -1441,6 +1441,11 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
                        *off = offset;
                        return skb;
                }
+               /* This looks weird, but this can happen if TCP collapsing
+                * splitted a fat GRO packet, while we released socket lock
+                * in skb_splice_bits()
+                */
+               sk_eat_skb(sk, skb, false);
        }
        return NULL;
 }
@@ -1482,7 +1487,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                                        break;
                        }
                        used = recv_actor(desc, skb, offset, len);
-                       if (used < 0) {
+                       if (used <= 0) {
                                if (!copied)
                                        copied = used;
                                break;
@@ -1520,8 +1525,10 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
        tcp_rcv_space_adjust(sk);
 
        /* Clean up data we have read: This will do ACK frames. */
-       if (copied > 0)
+       if (copied > 0) {
+               tcp_recv_skb(sk, seq, &offset);
                tcp_cleanup_rbuf(sk, copied);
+       }
        return copied;
 }
 EXPORT_SYMBOL(tcp_read_sock);
index 291f2ed7cc311649a72700d65098b6d70ba338b5..cdf2e707bb100500511d6b2d46ece69e19163cfa 100644 (file)
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp)
 {
        int cnt; /* increase in packets */
        unsigned int delta = 0;
+       u32 snd_cwnd = tp->snd_cwnd;
+
+       if (unlikely(!snd_cwnd)) {
+               pr_err_once("snd_cwnd is nul, please report this bug.\n");
+               snd_cwnd = 1U;
+       }
 
        /* RFC3465: ABC Slow start
         * Increase only after a full MSS of bytes is acked
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp)
        if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
                cnt = sysctl_tcp_max_ssthresh >> 1;     /* limited slow start */
        else
-               cnt = tp->snd_cwnd;                     /* exponential increase */
+               cnt = snd_cwnd;                         /* exponential increase */
 
        /* RFC3465: ABC
         * We MAY increase by 2 if discovered delayed ack
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp)
        tp->bytes_acked = 0;
 
        tp->snd_cwnd_cnt += cnt;
-       while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
-               tp->snd_cwnd_cnt -= tp->snd_cwnd;
+       while (tp->snd_cwnd_cnt >= snd_cwnd) {
+               tp->snd_cwnd_cnt -= snd_cwnd;
                delta++;
        }
-       tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
+       tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
index a28e4db8a952b15cc2f06578f12acbe6353c162a..ad70a962c20e1daa7637a190d1c81208d6656263 100644 (file)
@@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
                }
        } else {
                if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
+                       if (!tcp_packets_in_flight(tp)) {
+                               tcp_enter_frto_loss(sk, 2, flag);
+                               return true;
+                       }
+
                        /* Prevent sending of new data. */
                        tp->snd_cwnd = min(tp->snd_cwnd,
                                           tcp_packets_in_flight(tp));
@@ -5543,7 +5548,7 @@ slow_path:
        if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
                goto csum_error;
 
-       if (!th->ack)
+       if (!th->ack && !th->rst)
                goto discard;
 
        /*
@@ -5649,8 +5654,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
         * the remote receives only the retransmitted (regular) SYNs: either
         * the original SYN-data or the corresponding SYN-ACK is lost.
         */
-       syn_drop = (cookie->len <= 0 && data &&
-                   inet_csk(sk)->icsk_retransmits);
+       syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
 
        tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
 
@@ -5988,7 +5992,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        goto discard;
        }
 
-       if (!th->ack)
+       if (!th->ack && !th->rst)
                goto discard;
 
        if (!tcp_validate_incoming(sk, skb, th, 0))
index 54139fa514e6ee2d82c4cf9ca8528e012b546a90..eadb693eef55dffdaacf538b36ced145b09fa191 100644 (file)
@@ -369,11 +369,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
         * We do take care of PMTU discovery (RFC1191) special case :
         * we can receive locally generated ICMP messages while socket is held.
         */
-       if (sock_owned_by_user(sk) &&
-           type != ICMP_DEST_UNREACH &&
-           code != ICMP_FRAG_NEEDED)
-               NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
-
+       if (sock_owned_by_user(sk)) {
+               if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
+                       NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+       }
        if (sk->sk_state == TCP_CLOSE)
                goto out;
 
@@ -497,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                 * errors returned from accept().
                 */
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -1501,8 +1501,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
         * clogging syn queue with openreqs with exponentially increasing
         * timeout.
         */
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet_reqsk_alloc(&tcp_request_sock_ops);
        if (!req)
@@ -1667,6 +1669,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0;
 }
 EXPORT_SYMBOL(tcp_v4_conn_request);
index 79c8dbe59b5474bdc3e23ba8adc227e1bee016a4..1f4d405eafba746d2b8085132a740b17a6658c80 100644 (file)
@@ -1952,6 +1952,7 @@ struct proto udp_prot = {
        .recvmsg           = udp_recvmsg,
        .sendpage          = udp_sendpage,
        .backlog_rcv       = __udp_queue_rcv_skb,
+       .release_cb        = ip4_datagram_release_cb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .rehash            = udp_v4_rehash,
index 408cac4ae00a41129e4bf89506e96f646c607380..1b5d8cb9b123dff17c85d59f6d1c8e44cfdefa4e 100644 (file)
@@ -154,6 +154,11 @@ static void addrconf_type_change(struct net_device *dev,
                                 unsigned long event);
 static int addrconf_ifdown(struct net_device *dev, int how);
 
+static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+                                                 int plen,
+                                                 const struct net_device *dev,
+                                                 u32 flags, u32 noflags);
+
 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_timer(unsigned long data);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
@@ -250,12 +255,6 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
        return !qdisc_tx_is_noop(dev);
 }
 
-/* Check if a route is valid prefix route */
-static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
-{
-       return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
-}
-
 static void addrconf_del_timer(struct inet6_ifaddr *ifp)
 {
        if (del_timer(&ifp->timer))
@@ -941,17 +940,15 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
        if ((ifp->flags & IFA_F_PERMANENT) && onlink < 1) {
                struct in6_addr prefix;
                struct rt6_info *rt;
-               struct net *net = dev_net(ifp->idev->dev);
-               struct flowi6 fl6 = {};
 
                ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
-               fl6.flowi6_oif = ifp->idev->dev->ifindex;
-               fl6.daddr = prefix;
-               rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
-                                                        RT6_LOOKUP_F_IFACE);
 
-               if (rt != net->ipv6.ip6_null_entry &&
-                   addrconf_is_prefix_route(rt)) {
+               rt = addrconf_get_prefix_route(&prefix,
+                                              ifp->prefix_len,
+                                              ifp->idev->dev,
+                                              0, RTF_GATEWAY | RTF_DEFAULT);
+
+               if (rt) {
                        if (onlink == 0) {
                                ip6_del_rt(rt);
                                rt = NULL;
@@ -1663,6 +1660,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
        if (dev->addr_len != IEEE802154_ADDR_LEN)
                return -1;
        memcpy(eui, dev->dev_addr, 8);
+       eui[0] ^= 2;
        return 0;
 }
 
@@ -1877,7 +1875,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                        continue;
                if ((rt->rt6i_flags & flags) != flags)
                        continue;
-               if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
+               if ((rt->rt6i_flags & noflags) != 0)
                        continue;
                dst_hold(&rt->dst);
                break;
index ecc35b93314bb1b73c70df219c9cf8f125c77908..384233188ac1e38468b5edf71c105c21e6bf38bc 100644 (file)
@@ -472,7 +472,10 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, hdr_len);
        __skb_pull(skb, ah_hlen + hdr_len);
-       skb_set_transport_header(skb, -hdr_len);
+       if (x->props.mode == XFRM_MODE_TUNNEL)
+               skb_reset_transport_header(skb);
+       else
+               skb_set_transport_header(skb, -hdr_len);
 out:
        kfree(AH_SKB_CB(skb)->tmp);
        xfrm_input_resume(skb, err);
@@ -593,9 +596,13 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb->network_header += ah_hlen;
        memcpy(skb_network_header(skb), work_iph, hdr_len);
-       skb->transport_header = skb->network_header;
        __skb_pull(skb, ah_hlen + hdr_len);
 
+       if (x->props.mode == XFRM_MODE_TUNNEL)
+               skb_reset_transport_header(skb);
+       else
+               skb_set_transport_header(skb, -hdr_len);
+
        err = nexthdr;
 
 out_free:
index 8edf2601065af07790500809b8dee9fad0fc8eaa..7a778b9a7b859586219a37c7f80bd6ffeefe27c6 100644 (file)
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        sin->sin6_addr = ipv6_hdr(skb)->saddr;
                        if (np->rxopt.all)
-                               datagram_recv_ctl(sk, msg, skb);
+                               ip6_datagram_recv_ctl(sk, msg, skb);
                        if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
                                sin->sin6_scope_id = IP6CB(skb)->iif;
                } else {
@@ -468,7 +468,8 @@ out:
 }
 
 
-int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
+                         struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet6_skb_parm *opt = IP6CB(skb);
@@ -597,11 +598,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl);
 
-int datagram_send_ctl(struct net *net, struct sock *sk,
-                     struct msghdr *msg, struct flowi6 *fl6,
-                     struct ipv6_txoptions *opt,
-                     int *hlimit, int *tclass, int *dontfrag)
+int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
+                         struct msghdr *msg, struct flowi6 *fl6,
+                         struct ipv6_txoptions *opt,
+                         int *hlimit, int *tclass, int *dontfrag)
 {
        struct in6_pktinfo *src_info;
        struct cmsghdr *cmsg;
@@ -871,4 +873,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
 exit_f:
        return err;
 }
-EXPORT_SYMBOL_GPL(datagram_send_ctl);
+EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
index 282f3723ee194704ab7fa0757e2c032254903300..40ffd72243a4f2d1ec9e5da494b3f2650dd027a6 100644 (file)
@@ -300,7 +300,10 @@ static int esp_input_done2(struct sk_buff *skb, int err)
 
        pskb_trim(skb, skb->len - alen - padlen - 2);
        __skb_pull(skb, hlen);
-       skb_set_transport_header(skb, -hdr_len);
+       if (x->props.mode == XFRM_MODE_TUNNEL)
+               skb_reset_transport_header(skb);
+       else
+               skb_set_transport_header(skb, -hdr_len);
 
        err = nexthdr[1];
 
index b4a9fd51dae74bd8143b2d32e791e089f1d8562f..fff5bdd8b6800d56679957a62e201e84fd846d09 100644 (file)
@@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)
        return net->ipv6.icmp_sk[smp_processor_id()];
 }
 
+static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                      u8 type, u8 code, int offset, __be32 info)
+{
+       struct net *net = dev_net(skb->dev);
+
+       if (type == ICMPV6_PKT_TOOBIG)
+               ip6_update_pmtu(skb, net, info, 0, 0);
+       else if (type == NDISC_REDIRECT)
+               ip6_redirect(skb, net, 0, 0);
+}
+
 static int icmpv6_rcv(struct sk_buff *skb);
 
 static const struct inet6_protocol icmpv6_protocol = {
        .handler        =       icmpv6_rcv,
+       .err_handler    =       icmpv6_err,
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
index 29124b7a04c8dfb335df69014b5348e22b3fb6ab..d6de4b447250b752ec810c5e21b07f667ef95a8c 100644 (file)
@@ -365,8 +365,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                msg.msg_control = (void*)(fl->opt+1);
                memset(&flowi6, 0, sizeof(flowi6));
 
-               err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
-                                       &junk, &junk);
+               err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
+                                           &junk, &junk, &junk);
                if (err)
                        goto done;
                err = -EINVAL;
index c727e471275199ef27039f79b95755803435211a..131dd097736d174800cff6c9d554d274a4951884 100644 (file)
@@ -960,7 +960,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
        int ret;
 
        if (!ip6_tnl_xmit_ctl(t))
-               return -1;
+               goto tx_err;
 
        switch (skb->protocol) {
        case htons(ETH_P_IP):
index 5552d13ae92f8554c04ec912b82317dfeef065a5..0c7c03d50dc0342c7caad25c9d24c3931c50438b 100644 (file)
@@ -1213,10 +1213,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                if (dst_allfrag(rt->dst.path))
                        cork->flags |= IPCORK_ALLFRAG;
                cork->length = 0;
-               exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
+               exthdrlen = (opt ? opt->opt_flen : 0);
                length += exthdrlen;
                transhdrlen += exthdrlen;
-               dst_exthdrlen = rt->dst.header_len;
+               dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
        } else {
                rt = (struct rt6_info *)cork->dst;
                fl6 = &inet->cork.fl.u.ip6;
index 26dcdec9e3a5f6cbbe799402c39bb5d09c9d561d..8fd154e5f07966dd049bcdb8e93d9f7b8ab5452e 100644 (file)
@@ -1710,6 +1710,9 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
                        return -EINVAL;
                if (get_user(v, (u32 __user *)optval))
                        return -EFAULT;
+               /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
+               if (v != RT_TABLE_DEFAULT && v >= 100000000)
+                       return -EINVAL;
                if (sk == mrt->mroute6_sk)
                        return -EBUSY;
 
index ee94d31c9d4d494cdfe2dce1ac812e4e094fe65f..d1e2e8ef29c54abd86064728ec145f0478f360af 100644 (file)
@@ -476,8 +476,8 @@ sticky_done:
                msg.msg_controllen = optlen;
                msg.msg_control = (void*)(opt+1);
 
-               retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk,
-                                        &junk);
+               retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
+                                            &junk, &junk);
                if (retv)
                        goto done;
 update:
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                release_sock(sk);
 
                if (skb) {
-                       int err = datagram_recv_ctl(sk, &msg, skb);
+                       int err = ip6_datagram_recv_ctl(sk, &msg, skb);
                        kfree_skb(skb);
                        if (err)
                                return err;
index 7302b0b7b642e91257b119c5b89938f4ac2846e6..83acc1405a18dcef218625e8517431978393ac12 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/ipv6.h>
+#include <net/ipv6.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_ipv6/ip6t_NPT.h>
@@ -18,11 +19,20 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
 {
        struct ip6t_npt_tginfo *npt = par->targinfo;
        __wsum src_sum = 0, dst_sum = 0;
+       struct in6_addr pfx;
        unsigned int i;
 
        if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
                return -EINVAL;
 
+       /* Ensure that LSB of prefix is zero */
+       ipv6_addr_prefix(&pfx, &npt->src_pfx.in6, npt->src_pfx_len);
+       if (!ipv6_addr_equal(&pfx, &npt->src_pfx.in6))
+               return -EINVAL;
+       ipv6_addr_prefix(&pfx, &npt->dst_pfx.in6, npt->dst_pfx_len);
+       if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) {
                src_sum = csum_add(src_sum,
                                (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
@@ -30,7 +40,7 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
                                (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
        }
 
-       npt->adjustment = (__force __sum16) csum_sub(src_sum, dst_sum);
+       npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
        return 0;
 }
 
@@ -51,7 +61,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
 
                idx = i / 32;
                addr->s6_addr32[idx] &= mask;
-               addr->s6_addr32[idx] |= npt->dst_pfx.in6.s6_addr32[idx];
+               addr->s6_addr32[idx] |= ~mask & npt->dst_pfx.in6.s6_addr32[idx];
        }
 
        if (pfx_len <= 48)
@@ -66,8 +76,8 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
                        return false;
        }
 
-       sum = (__force __sum16) csum_add((__force __wsum)addr->s6_addr16[idx],
-                        npt->adjustment);
+       sum = ~csum_fold(csum_add(csum_unfold((__force __sum16)addr->s6_addr16[idx]),
+                                 csum_unfold(npt->adjustment)));
        if (sum == CSUM_MANGLED_0)
                sum = 0;
        *(__force __sum16 *)&addr->s6_addr16[idx] = sum;
index 6cd29b1e8b926e26a7bc0df5b5c5263fc2e23d84..70fa8144999780cef19f5102247882ba6d24f6f6 100644 (file)
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (np->rxopt.all)
-               datagram_recv_ctl(sk, msg, skb);
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        err = copied;
        if (flags & MSG_TRUNC)
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index e229a3bc345dc4138a188282c4ab4f1717882832..363d8b7772e8d23f180a8db6c0ae707225ad8a63 100644 (file)
@@ -928,7 +928,7 @@ restart:
        dst_hold(&rt->dst);
        read_unlock_bh(&table->tb6_lock);
 
-       if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
+       if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
                nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
        else if (!(rt->dst.flags & DST_HOST))
                nrt = rt6_alloc_clone(rt, &fl6->daddr);
index 93825dd3a7c070be4dd1e294a155b409525337e2..4f43537197ef805927e84a182ae1e703edb0f2c8 100644 (file)
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
 
                inet_csk_reqsk_queue_drop(sk, req, prev);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
                goto out;
 
        case TCP_SYN_SENT:
@@ -958,8 +959,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
                goto drop;
+       }
 
        req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
        if (req == NULL)
@@ -1108,6 +1111,7 @@ drop_and_release:
 drop_and_free:
        reqsk_free(req);
 drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0; /* don't send reset */
 }
 
index dfaa29b8b2939c03fef13fa416f6fdaef031bf5d..fb083295ff0bde8029040b3f867be6ca46bf0901 100644 (file)
@@ -443,7 +443,7 @@ try_again:
                        ip_cmsg_recv(msg, skb);
        } else {
                if (np->rxopt.all)
-                       datagram_recv_ctl(sk, msg, skb);
+                       ip6_datagram_recv_ctl(sk, msg, skb);
        }
 
        err = copied;
@@ -1153,8 +1153,8 @@ do_udp_sendmsg:
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(*opt);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
index 3ad1f9db5f8b697167c8ed1ed249567f36ccf008..df082508362d892e623a80b57c732504e0b78581 100644 (file)
@@ -1806,7 +1806,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
        struct iucv_irq_data *p;
        struct iucv_irq_list *work;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_IUC]++;
+       inc_irq_stat(IRQEXT_IUC);
        p = iucv_irq_data[smp_processor_id()];
        if (p->ippathid >= iucv_max_pathid) {
                WARN_ON(p->ippathid >= iucv_max_pathid);
index 1a9f3723c13cb45b608bbc07fe4a9803df926523..2ac884d0e89bd06ff820073351004509b0838902 100644 (file)
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 
 }
 
+/* Lookup the tunnel socket, possibly involving the fs code if the socket is
+ * owned by userspace.  A struct sock returned from this function must be
+ * released using l2tp_tunnel_sock_put once you're done with it.
+ */
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
+{
+       int err = 0;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       if (!tunnel)
+               goto out;
+
+       if (tunnel->fd >= 0) {
+               /* Socket is owned by userspace, who might be in the process
+                * of closing it.  Look the socket up using the fd to ensure
+                * consistency.
+                */
+               sock = sockfd_lookup(tunnel->fd, &err);
+               if (sock)
+                       sk = sock->sk;
+       } else {
+               /* Socket is owned by kernelspace */
+               sk = tunnel->sock;
+       }
+
+out:
+       return sk;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup);
+
+/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
+void l2tp_tunnel_sock_put(struct sock *sk)
+{
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       if (tunnel) {
+               if (tunnel->fd >= 0) {
+                       /* Socket is owned by userspace */
+                       sockfd_put(sk->sk_socket);
+               }
+               sock_put(sk);
+       }
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
+
 /* Lookup a session by id in the global session list
  */
 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
@@ -1123,8 +1168,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
        struct udphdr *uh;
        struct inet_sock *inet;
        __wsum csum;
-       int old_headroom;
-       int new_headroom;
        int headroom;
        int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
        int udp_len;
@@ -1136,16 +1179,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
         */
        headroom = NET_SKB_PAD + sizeof(struct iphdr) +
                uhlen + hdr_len;
-       old_headroom = skb_headroom(skb);
        if (skb_cow_head(skb, headroom)) {
                kfree_skb(skb);
                return NET_XMIT_DROP;
        }
 
-       new_headroom = skb_headroom(skb);
        skb_orphan(skb);
-       skb->truesize += new_headroom - old_headroom;
-
        /* Setup L2TP header */
        session->build_header(session, __skb_push(skb, hdr_len));
 
@@ -1607,6 +1646,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
        tunnel->sock = sk;
+       tunnel->fd = fd;
        lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
 
        sk->sk_allocation = GFP_ATOMIC;
@@ -1642,24 +1682,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
-       int err = 0;
-       struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
+       int err = -EBADF;
+       struct socket *sock = NULL;
+       struct sock *sk = NULL;
+
+       sk = l2tp_tunnel_sock_lookup(tunnel);
+       if (!sk)
+               goto out;
+
+       sock = sk->sk_socket;
+       BUG_ON(!sock);
 
        /* Force the tunnel socket to close. This will eventually
         * cause the tunnel to be deleted via the normal socket close
         * mechanisms when userspace closes the tunnel socket.
         */
-       if (sock != NULL) {
-               err = inet_shutdown(sock, 2);
+       err = inet_shutdown(sock, 2);
 
-               /* If the tunnel's socket was created by the kernel,
-                * close the socket here since the socket was not
-                * created by userspace.
-                */
-               if (sock->file == NULL)
-                       err = inet_release(sock);
-       }
+       /* If the tunnel's socket was created by the kernel,
+        * close the socket here since the socket was not
+        * created by userspace.
+        */
+       if (sock->file == NULL)
+               err = inet_release(sock);
 
+       l2tp_tunnel_sock_put(sk);
+out:
        return err;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
index 56d583e083a7baf7b0cf707a76882a88701d8025..e62204cad4fe00c91e88f791c7c7896c130249fa 100644 (file)
@@ -188,7 +188,8 @@ struct l2tp_tunnel {
        int (*recv_payload_hook)(struct sk_buff *skb);
        void (*old_sk_destruct)(struct sock *);
        struct sock             *sock;          /* Parent socket */
-       int                     fd;
+       int                     fd;             /* Parent fd, if tunnel socket
+                                                * was created by userspace */
 
        uint8_t                 priv[0];        /* private data */
 };
@@ -228,6 +229,8 @@ out:
        return tunnel;
 }
 
+extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+extern void l2tp_tunnel_sock_put(struct sock *sk);
 extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
 extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
 extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
index 927547171bc7119f57a7b8afecf453ec5eeabf61..8ee4a86ae996ca624e06a1422e4e07a2e957c584 100644 (file)
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                memset(opt, 0, sizeof(struct ipv6_txoptions));
                opt->tot_len = sizeof(struct ipv6_txoptions);
 
-               err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
-                                       &hlimit, &tclass, &dontfrag);
+               err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
+                                           &hlimit, &tclass, &dontfrag);
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                            struct msghdr *msg, size_t len, int noblock,
                            int flags, int *addr_len)
 {
-       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                        lsa->l2tp_scope_id = IP6CB(skb)->iif;
        }
 
-       if (inet->cmsg_flags)
-               ip_cmsg_recv(msg, skb);
+       if (np->rxopt.all)
+               ip6_datagram_recv_ctl(sk, msg, skb);
 
        if (flags & MSG_TRUNC)
                copied = skb->len;
index 286366ef8930a08981f04952d4459f2b9a413071..716605c241f482e4793e86bdb1dc61d1c3596602 100644 (file)
@@ -388,8 +388,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        struct l2tp_session *session;
        struct l2tp_tunnel *tunnel;
        struct pppol2tp_session *ps;
-       int old_headroom;
-       int new_headroom;
        int uhlen, headroom;
 
        if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -408,7 +406,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (tunnel == NULL)
                goto abort_put_sess;
 
-       old_headroom = skb_headroom(skb);
        uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
        headroom = NET_SKB_PAD +
                   sizeof(struct iphdr) + /* IP header */
@@ -418,9 +415,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        if (skb_cow_head(skb, headroom))
                goto abort_put_sess_tun;
 
-       new_headroom = skb_headroom(skb);
-       skb->truesize += new_headroom - old_headroom;
-
        /* Setup PPP header */
        __skb_push(skb, sizeof(ppph));
        skb->data[0] = ppph[0];
index 5c61677487cf831324994e24e22528d15f35aa3a..0479c64aa83cc8beb27a90dedd1340cbeb2065a2 100644 (file)
@@ -164,7 +164,17 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
                        sta = sta_info_get(sdata, mac_addr);
                else
                        sta = sta_info_get_bss(sdata, mac_addr);
-               if (!sta) {
+               /*
+                * The ASSOC test makes sure the driver is ready to
+                * receive the key. When wpa_supplicant has roamed
+                * using FT, it attempts to set the key before
+                * association has completed, this rejects that attempt
+                * so it will set the key again after assocation.
+                *
+                * TODO: accept the key if we have a station entry and
+                *       add it to the device after the station.
+                */
+               if (!sta || !test_sta_flag(sta, WLAN_STA_ASSOC)) {
                        ieee80211_key_free(sdata->local, key);
                        err = -ENOENT;
                        goto out_unlock;
@@ -1009,6 +1019,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        if (old_probe_resp)
                kfree_rcu(old_probe_resp, rcu_head);
 
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               sta_info_flush(local, vlan);
        sta_info_flush(local, sdata);
        ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
 
@@ -1992,7 +2004,8 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-       memcpy(sdata->vif.bss_conf.mcast_rate, rate, sizeof(rate));
+       memcpy(sdata->vif.bss_conf.mcast_rate, rate,
+              sizeof(int) * IEEE80211_NUM_BANDS);
 
        return 0;
 }
index 53f03120db55dc3d2c1e8c775a054182d21dac69..80e55527504b91cd71b6d33a946507ad10b59c7e 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/nl80211.h>
 #include <linux/export.h>
+#include <linux/rtnetlink.h>
 #include <net/cfg80211.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -197,6 +198,15 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
 
        ctx = container_of(conf, struct ieee80211_chanctx, conf);
 
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *vlan;
+
+               /* for the VLAN list */
+               ASSERT_RTNL();
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       rcu_assign_pointer(vlan->vif.chanctx_conf, NULL);
+       }
+
        ieee80211_unassign_vif_chanctx(sdata, ctx);
        if (ctx->refcount == 0)
                ieee80211_free_chanctx(local, ctx);
@@ -316,6 +326,15 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               struct ieee80211_sub_if_data *vlan;
+
+               /* for the VLAN list */
+               ASSERT_RTNL();
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       rcu_assign_pointer(vlan->vif.chanctx_conf, &ctx->conf);
+       }
+
        ieee80211_recalc_smps_chanctx(local, ctx);
  out:
        mutex_unlock(&local->chanctx_mtx);
@@ -331,6 +350,25 @@ void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&sdata->local->chanctx_mtx);
 }
 
+void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *ap;
+       struct ieee80211_chanctx_conf *conf;
+
+       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->bss))
+               return;
+
+       ap = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
+
+       mutex_lock(&local->chanctx_mtx);
+
+       conf = rcu_dereference_protected(ap->vif.chanctx_conf,
+                                        lockdep_is_held(&local->chanctx_mtx));
+       rcu_assign_pointer(sdata->vif.chanctx_conf, conf);
+       mutex_unlock(&local->chanctx_mtx);
+}
+
 void ieee80211_iter_chan_contexts_atomic(
        struct ieee80211_hw *hw,
        void (*iter)(struct ieee80211_hw *hw,
index 8881fc77fb1324c50d79ca6dcc2dd5423d7a18c9..6b7644e818d8f30629513318d4f9a17e4e622606 100644 (file)
@@ -703,8 +703,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
        sdata_info(sdata,
                   "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
 
-       ieee80211_request_internal_scan(sdata,
-                       ifibss->ssid, ifibss->ssid_len, NULL);
+       ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
+                                   NULL);
 }
 
 static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -802,9 +802,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
                                        IEEE80211_SCAN_INTERVAL)) {
                sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
-               ieee80211_request_internal_scan(sdata,
-                               ifibss->ssid, ifibss->ssid_len,
-                               ifibss->fixed_channel ? ifibss->channel : NULL);
+               ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+                                           ifibss->ssid_len, chan);
        } else {
                int interval = IEEE80211_SCAN_INTERVAL;
 
index 42d0d026773090633aec6cecf21d5c6065a4b69d..2ed065c095629403c42574872fec6a8b85819871 100644 (file)
@@ -92,8 +92,6 @@ struct ieee80211_bss {
 
        u32 device_ts;
 
-       u8 dtim_period;
-
        bool wmm_used;
        bool uapsd_supported;
 
@@ -140,7 +138,6 @@ enum ieee80211_bss_corrupt_data_flags {
 
 /**
  * enum ieee80211_valid_data_flags - BSS valid data flags
- * @IEEE80211_BSS_VALID_DTIM: DTIM data was gathered from non-corrupt IE
  * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE
  * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE
  * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
@@ -151,7 +148,6 @@ enum ieee80211_bss_corrupt_data_flags {
  * beacon/probe response.
  */
 enum ieee80211_bss_valid_data_flags {
-       IEEE80211_BSS_VALID_DTIM                = BIT(0),
        IEEE80211_BSS_VALID_WMM                 = BIT(1),
        IEEE80211_BSS_VALID_RATES               = BIT(2),
        IEEE80211_BSS_VALID_ERP                 = BIT(3)
@@ -440,6 +436,7 @@ struct ieee80211_if_managed {
        unsigned long timers_running; /* used for quiesce/restart */
        bool powersave; /* powersave requested for this iface */
        bool broken_ap; /* AP is broken -- turn off powersave */
+       u8 dtim_period;
        enum ieee80211_smps_mode req_smps, /* requested smps mode */
                                 driver_smps_mode; /* smps mode request */
 
@@ -773,6 +770,10 @@ struct ieee80211_sub_if_data {
                u32 mntr_flags;
        } u;
 
+       spinlock_t cleanup_stations_lock;
+       struct list_head cleanup_stations;
+       struct work_struct cleanup_stations_wk;
+
 #ifdef CONFIG_MAC80211_DEBUGFS
        struct {
                struct dentry *dir;
@@ -1329,9 +1330,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
 
 /* scan/BSS handling */
 void ieee80211_scan_work(struct work_struct *work);
-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
-                                   const u8 *ssid, u8 ssid_len,
-                                   struct ieee80211_channel *chan);
+int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+                               const u8 *ssid, u8 ssid_len,
+                               struct ieee80211_channel *chan);
 int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
                           struct cfg80211_scan_request *req);
 void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1357,10 +1358,8 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sched_scan_stopped_work(struct work_struct *work);
 
 /* off-channel helpers */
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
-                                   bool offchannel_ps_enable);
-void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool offchannel_ps_disable);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local);
+void ieee80211_offchannel_return(struct ieee80211_local *local);
 void ieee80211_roc_setup(struct ieee80211_local *local);
 void ieee80211_start_next_roc(struct ieee80211_local *local);
 void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
@@ -1628,6 +1627,7 @@ ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
                          const struct cfg80211_chan_def *chandef,
                          enum ieee80211_chanctx_mode mode);
 void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
+void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
 
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
index 09a80b55cf5a1ffaf06fdbe892ac469f6790b2bf..8be854e86cd987d61e01cc7eabc8f7ddb068eff3 100644 (file)
@@ -207,17 +207,8 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
 
 static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
 {
-       int meshhdrlen;
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
-       meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
-
-       /* FIX: what would be proper limits for MTU?
-        * This interface uses 802.3 frames. */
-       if (new_mtu < 256 ||
-           new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
+       if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN)
                return -EINVAL;
-       }
 
        dev->mtu = new_mtu;
        return 0;
@@ -586,11 +577,13 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
-               /* no need to tell driver, but set carrier */
-               if (rtnl_dereference(sdata->bss->beacon))
+               /* no need to tell driver, but set carrier and chanctx */
+               if (rtnl_dereference(sdata->bss->beacon)) {
+                       ieee80211_vif_vlan_copy_chanctx(sdata);
                        netif_carrier_on(dev);
-               else
+               } else {
                        netif_carrier_off(dev);
+               }
                break;
        case NL80211_IFTYPE_MONITOR:
                if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -839,6 +832,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP_VLAN:
                list_del(&sdata->u.vlan.list);
+               rcu_assign_pointer(sdata->vif.chanctx_conf, NULL);
                /* no need to tell driver */
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -865,20 +859,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                cancel_work_sync(&sdata->work);
                /*
                 * When we get here, the interface is marked down.
-                * Call rcu_barrier() to wait both for the RX path
+                * Call synchronize_rcu() to wait for the RX path
                 * should it be using the interface and enqueuing
-                * frames at this very time on another CPU, and
-                * for the sta free call_rcu callbacks.
-                */
-               rcu_barrier();
-
-               /*
-                * free_sta_rcu() enqueues a work for the actual
-                * sta cleanup, so we need to flush it while
-                * sdata is still valid.
+                * frames at this very time on another CPU.
                 */
-               flush_workqueue(local->workqueue);
-
+               synchronize_rcu();
                skb_queue_purge(&sdata->skb_queue);
 
                /*
@@ -1498,6 +1483,15 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
        mutex_unlock(&local->iflist_mtx);
 }
 
+static void ieee80211_cleanup_sdata_stas_wk(struct work_struct *wk)
+{
+       struct ieee80211_sub_if_data *sdata;
+
+       sdata = container_of(wk, struct ieee80211_sub_if_data, cleanup_stations_wk);
+
+       ieee80211_cleanup_sdata_stas(sdata);
+}
+
 int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                     struct wireless_dev **new_wdev, enum nl80211_iftype type,
                     struct vif_params *params)
@@ -1573,6 +1567,10 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
        INIT_LIST_HEAD(&sdata->key_list);
 
+       spin_lock_init(&sdata->cleanup_stations_lock);
+       INIT_LIST_HEAD(&sdata->cleanup_stations);
+       INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
+
        for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
                struct ieee80211_supported_band *sband;
                sband = local->hw.wiphy->bands[i];
index 1bf03f9ff3ba74394fb26201dfba3b3fd2e86a01..649ad513547f99728d43dbeeabcd2b58d7862c30 100644 (file)
@@ -163,7 +163,7 @@ int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
                return -ENOMEM;
        sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
        for (i = 0; i < RMC_BUCKETS; i++)
-               INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
+               INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
        return 0;
 }
 
@@ -177,7 +177,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
                return;
 
        for (i = 0; i < RMC_BUCKETS; i++)
-               list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) {
+               list_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
                        list_del(&p->list);
                        kmem_cache_free(rm_cache, p);
                }
@@ -210,7 +210,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
        /* Don't care about endianness since only match matters */
        memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum));
        idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask;
-       list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) {
+       list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
                ++entries;
                if (time_after(jiffies, p->exp_time) ||
                                (entries == RMC_QUEUE_MAX_LEN)) {
@@ -229,7 +229,7 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
        p->seqnum = seqnum;
        p->exp_time = jiffies + RMC_TIMEOUT;
        memcpy(p->sa, sa, ETH_ALEN);
-       list_add(&p->list, &rmc->bucket[idx].list);
+       list_add(&p->list, &rmc->bucket[idx]);
        return 0;
 }
 
index 7c9215fb2ac84ec5c17d3ee3d372a7a6bb4feedc..84c28c6101cdd2ef832920677d05b7f02b27ada2 100644 (file)
@@ -184,7 +184,7 @@ struct rmc_entry {
 };
 
 struct mesh_rmc {
-       struct rmc_entry bucket[RMC_BUCKETS];
+       struct list_head bucket[RMC_BUCKETS];
        u32 idx_mask;
 };
 
index 47aeee2d8db160f6fa9eb62c32131bbd686acc29..2659e428b80c86cf367a75b034c8d89a856f28a1 100644 (file)
@@ -215,6 +215,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
        skb->priority = 7;
 
        info->control.vif = &sdata->vif;
+       info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
        ieee80211_set_qos_hdr(sdata, skb);
 }
 
@@ -246,11 +247,13 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
                return -EAGAIN;
 
        skb = dev_alloc_skb(local->tx_headroom +
+                           IEEE80211_ENCRYPT_HEADROOM +
+                           IEEE80211_ENCRYPT_TAILROOM +
                            hdr_len +
                            2 + 15 /* PERR IE */);
        if (!skb)
                return -1;
-       skb_reserve(skb, local->tx_headroom);
+       skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
index 7753a9ca98a6c67870f68bd40b22f3af453a3945..5107248af7fbd87fae600240c48e570c591abc5f 100644 (file)
@@ -1074,12 +1074,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
                if (beaconint_us > latency) {
                        local->ps_sdata = NULL;
                } else {
-                       struct ieee80211_bss *bss;
                        int maxslp = 1;
-                       u8 dtimper;
-
-                       bss = (void *)found->u.mgd.associated->priv;
-                       dtimper = bss->dtim_period;
+                       u8 dtimper = found->u.mgd.dtim_period;
 
                        /* If the TIM IE is invalid, pretend the value is 1 */
                        if (!dtimper)
@@ -1410,10 +1406,17 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_led_assoc(local, 1);
 
-       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
-               bss_conf->dtim_period = bss->dtim_period;
-       else
+       if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+               /*
+                * If the AP is buggy we may get here with no DTIM period
+                * known, so assume it's 1 which is the only safe assumption
+                * in that case, although if the TIM IE is broken powersave
+                * probably just won't work at all.
+                */
+               bss_conf->dtim_period = sdata->u.mgd.dtim_period ?: 1;
+       } else {
                bss_conf->dtim_period = 0;
+       }
 
        bss_conf->assoc = 1;
 
@@ -1562,6 +1565,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 
        sdata->u.mgd.timers_running = 0;
 
+       sdata->vif.bss_conf.dtim_period = 0;
+
        ifmgd->flags = 0;
        ieee80211_vif_release_channel(sdata);
 }
@@ -2373,11 +2378,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_channel *channel;
        bool need_ps = false;
 
-       if (sdata->u.mgd.associated &&
-           ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) {
-               bss = (void *)sdata->u.mgd.associated->priv;
+       if ((sdata->u.mgd.associated &&
+            ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) ||
+           (sdata->u.mgd.assoc_data &&
+            ether_addr_equal(mgmt->bssid,
+                             sdata->u.mgd.assoc_data->bss->bssid))) {
                /* not previously set so we may need to recalc */
-               need_ps = !bss->dtim_period;
+               need_ps = sdata->u.mgd.associated && !sdata->u.mgd.dtim_period;
+
+               if (elems->tim && !elems->parse_error) {
+                       struct ieee80211_tim_ie *tim_ie = elems->tim;
+                       sdata->u.mgd.dtim_period = tim_ie->dtim_period;
+               }
        }
 
        if (elems->ds_params && elems->ds_params_len == 1)
@@ -3388,6 +3400,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
 
        ret = 0;
 
+out:
        while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
                                        IEEE80211_CHAN_DISABLED)) {
                if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
@@ -3396,14 +3409,13 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                        goto out;
                }
 
-               ret = chandef_downgrade(chandef);
+               ret |= chandef_downgrade(chandef);
        }
 
        if (chandef->width != vht_chandef.width)
                sdata_info(sdata,
-                          "local regulatory prevented using AP HT/VHT configuration, downgraded\n");
+                          "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
 
-out:
        WARN_ON_ONCE(!cfg80211_chandef_valid(chandef));
        return ret;
 }
@@ -3517,8 +3529,11 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
         */
        ret = ieee80211_vif_use_channel(sdata, &chandef,
                                        IEEE80211_CHANCTX_SHARED);
-       while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+       while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
                ifmgd->flags |= chandef_downgrade(&chandef);
+               ret = ieee80211_vif_use_channel(sdata, &chandef,
+                                               IEEE80211_CHANCTX_SHARED);
+       }
        return ret;
 }
 
@@ -3896,20 +3911,41 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        /* kick off associate process */
 
        ifmgd->assoc_data = assoc_data;
+       ifmgd->dtim_period = 0;
 
        err = ieee80211_prep_connection(sdata, req->bss, true);
        if (err)
                goto err_clear;
 
-       if (!bss->dtim_period &&
-           sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
-               /*
-                * Wait up to one beacon interval ...
-                * should this be more if we miss one?
-                */
-               sdata_info(sdata, "waiting for beacon from %pM\n",
-                          ifmgd->bssid);
-               assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
+       if (sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD) {
+               const struct cfg80211_bss_ies *beacon_ies;
+
+               rcu_read_lock();
+               beacon_ies = rcu_dereference(req->bss->beacon_ies);
+               if (!beacon_ies) {
+                       /*
+                        * Wait up to one beacon interval ...
+                        * should this be more if we miss one?
+                        */
+                       sdata_info(sdata, "waiting for beacon from %pM\n",
+                                  ifmgd->bssid);
+                       assoc_data->timeout =
+                               TU_TO_EXP_TIME(req->bss->beacon_interval);
+               } else {
+                       const u8 *tim_ie = cfg80211_find_ie(WLAN_EID_TIM,
+                                                           beacon_ies->data,
+                                                           beacon_ies->len);
+                       if (tim_ie && tim_ie[1] >=
+                                       sizeof(struct ieee80211_tim_ie)) {
+                               const struct ieee80211_tim_ie *tim;
+                               tim = (void *)(tim_ie + 2);
+                               ifmgd->dtim_period = tim->dtim_period;
+                       }
+                       assoc_data->have_beacon = true;
+                       assoc_data->sent_assoc = false;
+                       assoc_data->timeout = jiffies;
+               }
+               rcu_read_unlock();
        } else {
                assoc_data->have_beacon = true;
                assoc_data->sent_assoc = false;
index a5379aea7d09d2410c70a3b467a1f336aa32dc43..a3ad4c3c80a34229801bdbdd1124201485a2f671 100644 (file)
@@ -102,8 +102,7 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
        ieee80211_sta_reset_conn_monitor(sdata);
 }
 
-void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
-                                   bool offchannel_ps_enable)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -134,8 +133,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
 
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
                        netif_tx_stop_all_queues(sdata->dev);
-                       if (offchannel_ps_enable &&
-                           (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+                       if (sdata->vif.type == NL80211_IFTYPE_STATION &&
                            sdata->u.mgd.associated)
                                ieee80211_offchannel_ps_enable(sdata);
                }
@@ -143,8 +141,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
        mutex_unlock(&local->iflist_mtx);
 }
 
-void ieee80211_offchannel_return(struct ieee80211_local *local,
-                                bool offchannel_ps_disable)
+void ieee80211_offchannel_return(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
 
@@ -163,11 +160,9 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
                        continue;
 
                /* Tell AP we're back */
-               if (offchannel_ps_disable &&
-                   sdata->vif.type == NL80211_IFTYPE_STATION) {
-                       if (sdata->u.mgd.associated)
-                               ieee80211_offchannel_ps_disable(sdata);
-               }
+               if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+                   sdata->u.mgd.associated)
+                       ieee80211_offchannel_ps_disable(sdata);
 
                if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
                        /*
@@ -385,7 +380,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
                        local->tmp_channel = NULL;
                        ieee80211_hw_config(local, 0);
 
-                       ieee80211_offchannel_return(local, true);
+                       ieee80211_offchannel_return(local);
                }
 
                ieee80211_recalc_idle(local);
index 8ed83dcc149fc7f38ca9beaf83a2c67a82c336ec..bf82e69d0601bf9210e817837b2af7725a9f7d87 100644 (file)
@@ -113,18 +113,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
                        bss->valid_data |= IEEE80211_BSS_VALID_ERP;
        }
 
-       if (elems->tim && (!elems->parse_error ||
-                          !(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
-               struct ieee80211_tim_ie *tim_ie = elems->tim;
-               bss->dtim_period = tim_ie->dtim_period;
-               if (!elems->parse_error)
-                       bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
-       }
-
-       /* If the beacon had no TIM IE, or it was invalid, use 1 */
-       if (beacon && !bss->dtim_period)
-               bss->dtim_period = 1;
-
        /* replace old supported rates if we get new values */
        if (!elems->parse_error ||
            !(bss->valid_data & IEEE80211_BSS_VALID_RATES)) {
@@ -304,7 +292,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
        if (!was_hw_scan) {
                ieee80211_configure_filter(local);
                drv_sw_scan_complete(local);
-               ieee80211_offchannel_return(local, true);
+               ieee80211_offchannel_return(local);
        }
 
        ieee80211_recalc_idle(local);
@@ -353,7 +341,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
        local->next_scan_state = SCAN_DECISION;
        local->scan_channel_idx = 0;
 
-       ieee80211_offchannel_stop_vifs(local, true);
+       ieee80211_offchannel_stop_vifs(local);
 
        ieee80211_configure_filter(local);
 
@@ -690,12 +678,8 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
        local->scan_channel = NULL;
        ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 
-       /*
-        * Re-enable vifs and beaconing.  Leave PS
-        * in off-channel state..will put that back
-        * on-channel at the end of scanning.
-        */
-       ieee80211_offchannel_return(local, false);
+       /* disable PS */
+       ieee80211_offchannel_return(local);
 
        *next_delay = HZ / 5;
        /* afterwards, resume scan & go to next channel */
@@ -705,8 +689,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
 static void ieee80211_scan_state_resume(struct ieee80211_local *local,
                                        unsigned long *next_delay)
 {
-       /* PS already is in off-channel mode */
-       ieee80211_offchannel_stop_vifs(local, false);
+       ieee80211_offchannel_stop_vifs(local);
 
        if (local->ops->flush) {
                drv_flush(local, false);
@@ -832,9 +815,9 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
        return res;
 }
 
-int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
-                                   const u8 *ssid, u8 ssid_len,
-                                   struct ieee80211_channel *chan)
+int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
+                               const u8 *ssid, u8 ssid_len,
+                               struct ieee80211_channel *chan)
 {
        struct ieee80211_local *local = sdata->local;
        int ret = -EBUSY;
@@ -848,22 +831,36 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
 
        /* fill internal scan request */
        if (!chan) {
-               int i, nchan = 0;
+               int i, max_n;
+               int n_ch = 0;
 
                for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
                        if (!local->hw.wiphy->bands[band])
                                continue;
-                       for (i = 0;
-                            i < local->hw.wiphy->bands[band]->n_channels;
-                            i++) {
-                               local->int_scan_req->channels[nchan] =
+
+                       max_n = local->hw.wiphy->bands[band]->n_channels;
+                       for (i = 0; i < max_n; i++) {
+                               struct ieee80211_channel *tmp_ch =
                                    &local->hw.wiphy->bands[band]->channels[i];
-                               nchan++;
+
+                               if (tmp_ch->flags & (IEEE80211_CHAN_NO_IBSS |
+                                                    IEEE80211_CHAN_DISABLED))
+                                       continue;
+
+                               local->int_scan_req->channels[n_ch] = tmp_ch;
+                               n_ch++;
                        }
                }
 
-               local->int_scan_req->n_channels = nchan;
+               if (WARN_ON_ONCE(n_ch == 0))
+                       goto unlock;
+
+               local->int_scan_req->n_channels = n_ch;
        } else {
+               if (WARN_ON_ONCE(chan->flags & (IEEE80211_CHAN_NO_IBSS |
+                                               IEEE80211_CHAN_DISABLED)))
+                       goto unlock;
+
                local->int_scan_req->channels[0] = chan;
                local->int_scan_req->n_channels = 1;
        }
index f3e502502fee27374dc4c183733cbcc95217aed5..ca9fde1981889a559efa6b49385f071f1475a06f 100644 (file)
@@ -91,9 +91,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
        return -ENOENT;
 }
 
-static void free_sta_work(struct work_struct *wk)
+static void cleanup_single_sta(struct sta_info *sta)
 {
-       struct sta_info *sta = container_of(wk, struct sta_info, free_sta_wk);
        int ac, i;
        struct tid_ampdu_tx *tid_tx;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -153,11 +152,35 @@ static void free_sta_work(struct work_struct *wk)
        sta_info_free(local, sta);
 }
 
+void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+
+       spin_lock_bh(&sdata->cleanup_stations_lock);
+       while (!list_empty(&sdata->cleanup_stations)) {
+               sta = list_first_entry(&sdata->cleanup_stations,
+                                      struct sta_info, list);
+               list_del(&sta->list);
+               spin_unlock_bh(&sdata->cleanup_stations_lock);
+
+               cleanup_single_sta(sta);
+
+               spin_lock_bh(&sdata->cleanup_stations_lock);
+       }
+
+       spin_unlock_bh(&sdata->cleanup_stations_lock);
+}
+
 static void free_sta_rcu(struct rcu_head *h)
 {
        struct sta_info *sta = container_of(h, struct sta_info, rcu_head);
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-       ieee80211_queue_work(&sta->local->hw, &sta->free_sta_wk);
+       spin_lock(&sdata->cleanup_stations_lock);
+       list_add_tail(&sta->list, &sdata->cleanup_stations);
+       spin_unlock(&sdata->cleanup_stations_lock);
+
+       ieee80211_queue_work(&sdata->local->hw, &sdata->cleanup_stations_wk);
 }
 
 /* protected by RCU */
@@ -310,7 +333,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
 
        spin_lock_init(&sta->lock);
        INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
-       INIT_WORK(&sta->free_sta_wk, free_sta_work);
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 
@@ -862,7 +884,7 @@ void sta_info_init(struct ieee80211_local *local)
 
 void sta_info_stop(struct ieee80211_local *local)
 {
-       del_timer(&local->sta_cleanup);
+       del_timer_sync(&local->sta_cleanup);
        sta_info_flush(local, NULL);
 }
 
@@ -891,6 +913,20 @@ int sta_info_flush(struct ieee80211_local *local,
        }
        mutex_unlock(&local->sta_mtx);
 
+       rcu_barrier();
+
+       if (sdata) {
+               ieee80211_cleanup_sdata_stas(sdata);
+               cancel_work_sync(&sdata->cleanup_stations_wk);
+       } else {
+               mutex_lock(&local->iflist_mtx);
+               list_for_each_entry(sdata, &local->interfaces, list) {
+                       ieee80211_cleanup_sdata_stas(sdata);
+                       cancel_work_sync(&sdata->cleanup_stations_wk);
+               }
+               mutex_unlock(&local->iflist_mtx);
+       }
+
        return ret;
 }
 
index 1489bca9ea97c19ee525ea44c0d1af794ec94cca..37c1889afd3ae02f9d9d5f2573738a28d26c266e 100644 (file)
@@ -299,7 +299,6 @@ struct sta_info {
        spinlock_t lock;
 
        struct work_struct drv_unblock_wk;
-       struct work_struct free_sta_wk;
 
        u16 listen_interval;
 
@@ -563,4 +562,6 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
 void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
 void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
 
+void ieee80211_cleanup_sdata_stas(struct ieee80211_sub_if_data *sdata);
+
 #endif /* STA_INFO_H */
index e9eadc40c09cc20bce9ad37531ef45ae767f62e8..467c1d1b66f2f5615182abd689349027b88abe64 100644 (file)
@@ -1673,10 +1673,13 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
                        chanctx_conf =
                                rcu_dereference(tmp_sdata->vif.chanctx_conf);
        }
-       if (!chanctx_conf)
-               goto fail_rcu;
 
-       chan = chanctx_conf->def.chan;
+       if (chanctx_conf)
+               chan = chanctx_conf->def.chan;
+       else if (!local->use_chanctx)
+               chan = local->_oper_channel;
+       else
+               goto fail_rcu;
 
        /*
         * Frame injection is not allowed if beaconing is not allowed
index 746048b13ef3aff9659b7161199d2298960662ec..ae8ec6f2768888eec1fb8b8c68b4242eff033525 100644 (file)
@@ -61,14 +61,27 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
        return 1;
 }
 
+static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
+                         unsigned int sctphoff)
+{
+       __u32 crc32;
+       struct sk_buff *iter;
+
+       crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
+       skb_walk_frags(skb, iter)
+               crc32 = sctp_update_cksum((u8 *) iter->data,
+                                         skb_headlen(iter), crc32);
+       sctph->checksum = sctp_end_cksum(crc32);
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 static int
 sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
                  struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
 {
        sctp_sctphdr_t *sctph;
        unsigned int sctphoff = iph->len;
-       struct sk_buff *iter;
-       __be32 crc32;
 
 #ifdef CONFIG_IP_VS_IPV6
        if (cp->af == AF_INET6 && iph->fragoffs)
@@ -92,13 +105,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
        sctph = (void *) skb_network_header(skb) + sctphoff;
        sctph->source = cp->vport;
 
-       /* Calculate the checksum */
-       crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
-       skb_walk_frags(skb, iter)
-               crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
-                                         crc32);
-       crc32 = sctp_end_cksum(crc32);
-       sctph->checksum = crc32;
+       sctp_nat_csum(skb, sctph, sctphoff);
 
        return 1;
 }
@@ -109,8 +116,6 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
 {
        sctp_sctphdr_t *sctph;
        unsigned int sctphoff = iph->len;
-       struct sk_buff *iter;
-       __be32 crc32;
 
 #ifdef CONFIG_IP_VS_IPV6
        if (cp->af == AF_INET6 && iph->fragoffs)
@@ -134,13 +139,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
        sctph = (void *) skb_network_header(skb) + sctphoff;
        sctph->dest = cp->dport;
 
-       /* Calculate the checksum */
-       crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
-       skb_walk_frags(skb, iter)
-               crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
-                                         crc32);
-       crc32 = sctp_end_cksum(crc32);
-       sctph->checksum = crc32;
+       sctp_nat_csum(skb, sctph, sctphoff);
 
        return 1;
 }
index effa10c9e4e325bc3d5538f2d94988c191d7ba56..44fd10c539ac42dfaa967974051bdfacb17099fb 100644 (file)
@@ -1795,6 +1795,8 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
                                             GFP_KERNEL);
                        if (!tinfo->buf)
                                goto outtinfo;
+               } else {
+                       tinfo->buf = NULL;
                }
                tinfo->id = id;
 
index 016d95ead930cba8e6d2e9335cd2b64f473c09d1..e4a0c4fb3a7cef64d1f15c9173d5a3e62ad616b4 100644 (file)
@@ -1376,11 +1376,12 @@ void nf_conntrack_cleanup(struct net *net)
        synchronize_net();
        nf_conntrack_proto_fini(net);
        nf_conntrack_cleanup_net(net);
+}
 
-       if (net_eq(net, &init_net)) {
-               RCU_INIT_POINTER(nf_ct_destroy, NULL);
-               nf_conntrack_cleanup_init_net();
-       }
+void nf_conntrack_cleanup_end(void)
+{
+       RCU_INIT_POINTER(nf_ct_destroy, NULL);
+       nf_conntrack_cleanup_init_net();
 }
 
 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
index 363285d544a1c7402152e6a7da3a7129d94b83ae..e7185c68481659445b571ab5e7866728dd3947ec 100644 (file)
@@ -575,6 +575,7 @@ static int __init nf_conntrack_standalone_init(void)
 static void __exit nf_conntrack_standalone_fini(void)
 {
        unregister_pernet_subsys(&nf_conntrack_net_ops);
+       nf_conntrack_cleanup_end();
 }
 
 module_init(nf_conntrack_standalone_init);
index 8d987c3573fd4ca9e1708e188d487e53ebd7dfd7..7b3a9e5999c0664cebe524fefba7130a6df003ce 100644 (file)
@@ -345,19 +345,27 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 }
 EXPORT_SYMBOL_GPL(xt_find_revision);
 
-static char *textify_hooks(char *buf, size_t size, unsigned int mask)
+static char *
+textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
 {
-       static const char *const names[] = {
+       static const char *const inetbr_names[] = {
                "PREROUTING", "INPUT", "FORWARD",
                "OUTPUT", "POSTROUTING", "BROUTING",
        };
-       unsigned int i;
+       static const char *const arp_names[] = {
+               "INPUT", "FORWARD", "OUTPUT",
+       };
+       const char *const *names;
+       unsigned int i, max;
        char *p = buf;
        bool np = false;
        int res;
 
+       names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
+       max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
+                                          ARRAY_SIZE(inetbr_names);
        *p = '\0';
-       for (i = 0; i < ARRAY_SIZE(names); ++i) {
+       for (i = 0; i < max; ++i) {
                if (!(mask & (1 << i)))
                        continue;
                res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
@@ -402,8 +410,10 @@ int xt_check_match(struct xt_mtchk_param *par,
                pr_err("%s_tables: %s match: used from hooks %s, but only "
                       "valid from %s\n",
                       xt_prefix[par->family], par->match->name,
-                      textify_hooks(used, sizeof(used), par->hook_mask),
-                      textify_hooks(allow, sizeof(allow), par->match->hooks));
+                      textify_hooks(used, sizeof(used), par->hook_mask,
+                                    par->family),
+                      textify_hooks(allow, sizeof(allow), par->match->hooks,
+                                    par->family));
                return -EINVAL;
        }
        if (par->match->proto && (par->match->proto != proto || inv_proto)) {
@@ -575,8 +585,10 @@ int xt_check_target(struct xt_tgchk_param *par,
                pr_err("%s_tables: %s target: used from hooks %s, but only "
                       "usable from %s\n",
                       xt_prefix[par->family], par->target->name,
-                      textify_hooks(used, sizeof(used), par->hook_mask),
-                      textify_hooks(allow, sizeof(allow), par->target->hooks));
+                      textify_hooks(used, sizeof(used), par->hook_mask,
+                                    par->family),
+                      textify_hooks(allow, sizeof(allow), par->target->hooks,
+                                    par->family));
                return -EINVAL;
        }
        if (par->target->proto && (par->target->proto != proto || inv_proto)) {
index 2a0843081840c0cd937a3fb0e749965bf9e28d0c..bde009ed8d3bf36d6defdbdbad8aca991c539281 100644 (file)
@@ -109,7 +109,7 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
        struct xt_ct_target_info *info = par->targinfo;
        struct nf_conntrack_tuple t;
        struct nf_conn *ct;
-       int ret;
+       int ret = -EOPNOTSUPP;
 
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
@@ -247,7 +247,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
        struct xt_ct_target_info_v1 *info = par->targinfo;
        struct nf_conntrack_tuple t;
        struct nf_conn *ct;
-       int ret;
+       int ret = -EOPNOTSUPP;
 
        if (info->flags & ~XT_CT_NOTRACK)
                return -EINVAL;
index a9327e2e48ce8450212feef7085f91e446766faa..670cbc3518ded5930f900eaca7e0677835cf750b 100644 (file)
 /* Must be called with rcu_read_lock. */
 static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 {
-       if (unlikely(!vport)) {
-               kfree_skb(skb);
-               return;
-       }
+       if (unlikely(!vport))
+               goto error;
+
+       if (unlikely(skb_warn_if_lro(skb)))
+               goto error;
 
        /* Make our own copy of the packet.  Otherwise we will mangle the
         * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
@@ -50,6 +51,10 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 
        skb_push(skb, ETH_HLEN);
        ovs_vport_receive(vport, skb);
+       return;
+
+error:
+       kfree_skb(skb);
 }
 
 /* Called with rcu_read_lock and bottom-halves disabled. */
@@ -169,9 +174,6 @@ static int netdev_send(struct vport *vport, struct sk_buff *skb)
                goto error;
        }
 
-       if (unlikely(skb_warn_if_lro(skb)))
-               goto error;
-
        skb->dev = netdev_vport->dev;
        len = skb->len;
        dev_queue_xmit(skb);
index e639645e8fec8ca365c8fc1bc4fb3650db04b5f4..c111bd0e083a5576d86dfc8f9ad91ab320c46efa 100644 (file)
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock)
 
        packet_flush_mclist(sk);
 
-       memset(&req_u, 0, sizeof(req_u));
-
-       if (po->rx_ring.pg_vec)
+       if (po->rx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 0);
+       }
 
-       if (po->tx_ring.pg_vec)
+       if (po->tx_ring.pg_vec) {
+               memset(&req_u, 0, sizeof(req_u));
                packet_set_ring(sk, &req_u, 1, 1);
+       }
 
        fanout_release(sk);
 
index c9d931e7ffecf1134e3743f4fc3920a10bb1f9cc..b85107b5ef62e3b9f38c65fc0591695d4d222369 100644 (file)
@@ -148,11 +148,9 @@ static unsigned long rfkill_ratelimit(const unsigned long last)
 
 static void rfkill_schedule_ratelimited(void)
 {
-       if (delayed_work_pending(&rfkill_op_work))
-               return;
-       schedule_delayed_work(&rfkill_op_work,
-                             rfkill_ratelimit(rfkill_last_scheduled));
-       rfkill_last_scheduled = jiffies;
+       if (schedule_delayed_work(&rfkill_op_work,
+                                 rfkill_ratelimit(rfkill_last_scheduled)))
+               rfkill_last_scheduled = jiffies;
 }
 
 static void rfkill_schedule_global_op(enum rfkill_sched_op op)
index 51561eafcb72fb75546b2565be0b6f442160b808..79e8ed4ac7ce7de355895b63fc06991be0b6af26 100644 (file)
@@ -1135,9 +1135,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
        memset(&opt, 0, sizeof(opt));
 
        opt.rate.rate = cl->rate.rate_bps >> 3;
-       opt.buffer = cl->buffer;
+       opt.buffer = PSCHED_NS2TICKS(cl->buffer);
        opt.ceil.rate = cl->ceil.rate_bps >> 3;
-       opt.cbuffer = cl->cbuffer;
+       opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
        opt.quantum = cl->quantum;
        opt.prio = cl->prio;
        opt.level = cl->level;
index 298c0ddfb57e3e5625a69d412c548d198005fd3b..3d2acc7a9c8099e677412276eeafe5a8b3f90fcb 100644 (file)
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                if (q->rate) {
                        struct sk_buff_head *list = &sch->q;
 
-                       delay += packet_len_2_sched_time(skb->len, q);
-
                        if (!skb_queue_empty(list)) {
                                /*
-                                * Last packet in queue is reference point (now).
-                                * First packet in queue is already in flight,
-                                * calculate this time bonus and substract
+                                * Last packet in queue is reference point (now),
+                                * calculate this time bonus and subtract
                                 * from delay.
                                 */
-                               delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
+                               delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now;
+                               delay = max_t(psched_tdiff_t, 0, delay);
                                now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
                        }
+
+                       delay += packet_len_2_sched_time(skb->len, q);
                }
 
                cb->time_to_send = now + delay;
index 7521d944c0fb1c57de545938320c3b7ce4439a3e..cf4852814e0c667b85be0e7b763bd7fe8964f546 100644 (file)
@@ -3,8 +3,8 @@
 #
 
 menuconfig IP_SCTP
-       tristate "The SCTP Protocol (EXPERIMENTAL)"
-       depends on INET && EXPERIMENTAL
+       tristate "The SCTP Protocol"
+       depends on INET
        depends on IPV6 || IPV6=n
        select CRYPTO
        select CRYPTO_HMAC
index 159b9bc5d63300e53560cf6495f8f65b9fd06449..d8420ae614dcaa2b12dfbe5cef78a39c7bfda8ca 100644 (file)
@@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key)
                return;
 
        if (atomic_dec_and_test(&key->refcnt)) {
-               kfree(key);
+               kzfree(key);
                SCTP_DBG_OBJCNT_DEC(keys);
        }
 }
index 17a001bac2cc3c81ab052c2b603b02091bd924e0..1a9c5fb77310a22f609d07918ed5629c0d7cc7d7 100644 (file)
@@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 /* Final destructor for endpoint.  */
 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
 {
+       int i;
+
        SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
 
        /* Free up the HMAC transform. */
@@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
        sctp_inq_free(&ep->base.inqueue);
        sctp_bind_addr_free(&ep->base.bind_addr);
 
+       for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
+               memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
+
        /* Remove and free the port */
        if (sctp_sk(ep->base.sk)->bind_hash)
                sctp_put_port(ep->base.sk);
index f3f0f4dc31dd3adf33033e071e02cb891b982719..391a245d520316c865aad51424b94b9fe6fb8bf7 100644 (file)
@@ -326,9 +326,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
         */
        rcu_read_lock();
        list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-               if (!laddr->valid && laddr->state != SCTP_ADDR_SRC)
+               if (!laddr->valid)
                        continue;
-               if ((laddr->a.sa.sa_family == AF_INET6) &&
+               if ((laddr->state == SCTP_ADDR_SRC) &&
+                   (laddr->a.sa.sa_family == AF_INET6) &&
                    (scope <= sctp_scope(&laddr->a))) {
                        bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
                        if (!baddr || (matchlen < bmatchlen)) {
index 379c81dee9d1256b3a1af84b84bc23b28092f12b..9bcdbd02d77713a5973dd9afd71a9c4783919648 100644 (file)
@@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
 
 /* Free the outqueue structure and any related pending chunks.
  */
-void sctp_outq_teardown(struct sctp_outq *q)
+static void __sctp_outq_teardown(struct sctp_outq *q)
 {
        struct sctp_transport *transport;
        struct list_head *lchunk, *temp;
@@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
                sctp_chunk_free(chunk);
        }
 
-       q->error = 0;
-
        /* Throw away any leftover control chunks. */
        list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
                list_del_init(&chunk->list);
@@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
        }
 }
 
+void sctp_outq_teardown(struct sctp_outq *q)
+{
+       __sctp_outq_teardown(q);
+       sctp_outq_init(q->asoc, q);
+}
+
 /* Free the outqueue structure and any related pending chunks.  */
 void sctp_outq_free(struct sctp_outq *q)
 {
        /* Throw away leftover chunks. */
-       sctp_outq_teardown(q);
+       __sctp_outq_teardown(q);
 
        /* If we were kmalloc()'d, free the memory.  */
        if (q->malloced)
index 618ec7e216cae9bb17038c07ff5d87a75c8f6208..5131fcfedb03c0b09e8c7e451341cebd82e49692 100644 (file)
@@ -1779,8 +1779,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
 
        /* Update the content of current association. */
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
-       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
        sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+       sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+                       SCTP_STATE(SCTP_STATE_ESTABLISHED));
+       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
        return SCTP_DISPOSITION_CONSUME;
 
 nomem_ev:
index 9e65758cb03814f9372eafbf86c4821af124be00..cedd9bf67b8c5942aa0cbf5441342be62abd5e69 100644 (file)
@@ -3390,7 +3390,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 
        ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
 out:
-       kfree(authkey);
+       kzfree(authkey);
        return ret;
 }
 
index 043889ac86c0419b3d368481b202864077034efa..bf3c6e8fc4017a64f93ff91c0ce96c8debbf44f4 100644 (file)
@@ -366,7 +366,11 @@ int sctp_sysctl_net_register(struct net *net)
 
 void sctp_sysctl_net_unregister(struct net *net)
 {
+       struct ctl_table *table;
+
+       table = net->sctp.sysctl_header->ctl_table_arg;
        unregister_net_sysctl_table(net->sctp.sysctl_header);
+       kfree(table);
 }
 
 static struct ctl_table_header * sctp_sysctl_header;
index 1915ffe598e3faf6e0aff577f530fab85106c7c4..507b5e84fbdb26f88fab789e37f65a976ef6f198 100644 (file)
@@ -555,7 +555,7 @@ EXPORT_SYMBOL_GPL(rpc_clone_client);
  * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth
  *
  * @clnt: RPC client whose parameters are copied
- * @auth: security flavor for new client
+ * @flavor: security flavor for new client
  *
  * Returns a fresh RPC client or an ERR_PTR.
  */
index b4133bd13915317a96d94de9dcf2e5d48551870e..fb20f25ddec9b70c805ad65d675441d02b62055b 100644 (file)
@@ -98,9 +98,25 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
        list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
 }
 
+static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
+{
+       struct list_head *q = &queue->tasks[queue->priority];
+       struct rpc_task *task;
+
+       if (!list_empty(q)) {
+               task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+               if (task->tk_owner == queue->owner)
+                       list_move_tail(&task->u.tk_wait.list, q);
+       }
+}
+
 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
 {
-       queue->priority = priority;
+       if (queue->priority != priority) {
+               /* Fairness: rotate the list when changing priority */
+               rpc_rotate_queue_owner(queue);
+               queue->priority = priority;
+       }
 }
 
 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
@@ -972,8 +988,7 @@ static void rpc_async_release(struct work_struct *work)
 
 static void rpc_release_resources_task(struct rpc_task *task)
 {
-       if (task->tk_rqstp)
-               xprt_release(task);
+       xprt_release(task);
        if (task->tk_msg.rpc_cred) {
                put_rpccred(task->tk_msg.rpc_cred);
                task->tk_msg.rpc_cred = NULL;
index 0a148c9d2a5ce7b3915df8c10f5e5f0913b99c25..0f679df7d072794094ece1aa48988e834395d849 100644 (file)
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
 }
 
 /*
- * See net/ipv6/datagram.c : datagram_recv_ctl
+ * See net/ipv6/datagram.c : ip6_datagram_recv_ctl
  */
 static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
                                     struct cmsghdr *cmh)
index bd462a532acfa695906887db429896ae569eac26..33811db8788a2e74e5781a0c20bdbceb829b5018 100644 (file)
@@ -1136,10 +1136,18 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
 void xprt_release(struct rpc_task *task)
 {
        struct rpc_xprt *xprt;
-       struct rpc_rqst *req;
+       struct rpc_rqst *req = task->tk_rqstp;
 
-       if (!(req = task->tk_rqstp))
+       if (req == NULL) {
+               if (task->tk_client) {
+                       rcu_read_lock();
+                       xprt = rcu_dereference(task->tk_client->cl_xprt);
+                       if (xprt->snd_task == task)
+                               xprt_release_write(xprt, task);
+                       rcu_read_unlock();
+               }
                return;
+       }
 
        xprt = req->rq_xprt;
        if (task->tk_ops->rpc_count_stats != NULL)
index 14d990400354200056e99d14132d6e7ab2504ccb..b677eab55b68465494bffae68a2c38c2321f18ca 100644 (file)
@@ -866,8 +866,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                /* allow mac80211 to determine the timeout */
                wdev->ps_timeout = -1;
 
-               if (!dev->ethtool_ops)
-                       dev->ethtool_ops = &cfg80211_ethtool_ops;
+               netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
 
                if ((wdev->iftype == NL80211_IFTYPE_STATION ||
                     wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
index 01592d7d4789e389fd565e7cf9fb194dc7ee7bb0..45f1618c8e239c2db21692cfa5358f460918b76d 100644 (file)
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info,
                                                  &iwe, IW_EV_UINT_LEN);
        }
 
-       buf = kmalloc(30, GFP_ATOMIC);
+       buf = kmalloc(31, GFP_ATOMIC);
        if (buf) {
                memset(&iwe, 0, sizeof(iwe));
                iwe.cmd = IWEVCUSTOM;
index 41eabc46f110d9cb607cb24f8af68054b91bcccd..07c585756d2a52530dc3fd1f5b14192b1a3c4cd6 100644 (file)
@@ -2656,7 +2656,7 @@ static void xfrm_policy_fini(struct net *net)
                WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
 
                htab = &net->xfrm.policy_bydst[dir];
-               sz = (htab->hmask + 1);
+               sz = (htab->hmask + 1) * sizeof(struct hlist_head);
                WARN_ON(!hlist_empty(htab->table));
                xfrm_hash_free(htab->table, sz);
        }
index 765f6fe951ebc7ed8fabaed3a4dc6ea4299aebfa..35754cc8a9e5b9c51cdaf52693128ee2098718f7 100644 (file)
@@ -242,11 +242,13 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
        u32 diff;
        struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
        u32 seq = ntohl(net_seq);
-       u32 pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+       u32 pos;
 
        if (!replay_esn->replay_window)
                return;
 
+       pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
        if (seq > replay_esn->seq) {
                diff = seq - replay_esn->seq;
 
index 7b6792a18c051a94438493c55513f0c47c4d3483..6181c2cc9ca06221047025771950bfc37eb8ca7e 100644 (file)
@@ -5,12 +5,6 @@ menuconfig SAMPLES
 
 if SAMPLES
 
-config SAMPLE_TRACEPOINTS
-       tristate "Build tracepoints examples -- loadable modules only"
-       depends on TRACEPOINTS && m
-       help
-         This build tracepoints example modules.
-
 config SAMPLE_TRACE_EVENTS
        tristate "Build trace_events examples -- loadable modules only"
        depends on EVENT_TRACING && m
index 5ef08bba96ceaf817adff74aa4969e3bfdf3c37f..1a60c62e20454eeb8961fb9be7f80d2eaabef3fa 100644 (file)
@@ -1,4 +1,4 @@
 # Makefile for Linux samples code
 
-obj-$(CONFIG_SAMPLES)  += kobject/ kprobes/ tracepoints/ trace_events/ \
+obj-$(CONFIG_SAMPLES)  += kobject/ kprobes/ trace_events/ \
                           hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
index bbbd276659ba5edad0e01b915d03639c7980803f..7203e66dcd6f9e035a0710147ec179bab9acabb9 100644 (file)
@@ -19,6 +19,7 @@ bpf-direct-objs := bpf-direct.o
 
 # Try to match the kernel target.
 ifndef CONFIG_64BIT
+ifndef CROSS_COMPILE
 
 # s390 has -m31 flag to build 31 bit binaries
 ifndef CONFIG_S390
@@ -35,6 +36,7 @@ HOSTLOADLIBES_bpf-direct += $(MFLAG)
 HOSTLOADLIBES_bpf-fancy += $(MFLAG)
 HOSTLOADLIBES_dropper += $(MFLAG)
 endif
+endif
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
diff --git a/samples/tracepoints/Makefile b/samples/tracepoints/Makefile
deleted file mode 100644 (file)
index 36479ad..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-# builds the tracepoint example kernel modules;
-# then to use one (as root):  insmod <module_name.ko>
-
-obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o
-obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o
-obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o
diff --git a/samples/tracepoints/tp-samples-trace.h b/samples/tracepoints/tp-samples-trace.h
deleted file mode 100644 (file)
index 4d46be9..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _TP_SAMPLES_TRACE_H
-#define _TP_SAMPLES_TRACE_H
-
-#include <linux/proc_fs.h>     /* for struct inode and struct file */
-#include <linux/tracepoint.h>
-
-DECLARE_TRACE(subsys_event,
-       TP_PROTO(struct inode *inode, struct file *file),
-       TP_ARGS(inode, file));
-DECLARE_TRACE_NOARGS(subsys_eventb);
-#endif
diff --git a/samples/tracepoints/tracepoint-probe-sample.c b/samples/tracepoints/tracepoint-probe-sample.c
deleted file mode 100644 (file)
index 744c0b9..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * tracepoint-probe-sample.c
- *
- * sample tracepoint probes.
- */
-
-#include <linux/module.h>
-#include <linux/file.h>
-#include <linux/dcache.h>
-#include "tp-samples-trace.h"
-
-/*
- * Here the caller only guarantees locking for struct file and struct inode.
- * Locking must therefore be done in the probe to use the dentry.
- */
-static void probe_subsys_event(void *ignore,
-                              struct inode *inode, struct file *file)
-{
-       path_get(&file->f_path);
-       dget(file->f_path.dentry);
-       printk(KERN_INFO "Event is encountered with filename %s\n",
-               file->f_path.dentry->d_name.name);
-       dput(file->f_path.dentry);
-       path_put(&file->f_path);
-}
-
-static void probe_subsys_eventb(void *ignore)
-{
-       printk(KERN_INFO "Event B is encountered\n");
-}
-
-static int __init tp_sample_trace_init(void)
-{
-       int ret;
-
-       ret = register_trace_subsys_event(probe_subsys_event, NULL);
-       WARN_ON(ret);
-       ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL);
-       WARN_ON(ret);
-
-       return 0;
-}
-
-module_init(tp_sample_trace_init);
-
-static void __exit tp_sample_trace_exit(void)
-{
-       unregister_trace_subsys_eventb(probe_subsys_eventb, NULL);
-       unregister_trace_subsys_event(probe_subsys_event, NULL);
-       tracepoint_synchronize_unregister();
-}
-
-module_exit(tp_sample_trace_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mathieu Desnoyers");
-MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-probe-sample2.c b/samples/tracepoints/tracepoint-probe-sample2.c
deleted file mode 100644 (file)
index 9fcf990..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * tracepoint-probe-sample2.c
- *
- * 2nd sample tracepoint probes.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include "tp-samples-trace.h"
-
-/*
- * Here the caller only guarantees locking for struct file and struct inode.
- * Locking must therefore be done in the probe to use the dentry.
- */
-static void probe_subsys_event(void *ignore,
-                              struct inode *inode, struct file *file)
-{
-       printk(KERN_INFO "Event is encountered with inode number %lu\n",
-               inode->i_ino);
-}
-
-static int __init tp_sample_trace_init(void)
-{
-       int ret;
-
-       ret = register_trace_subsys_event(probe_subsys_event, NULL);
-       WARN_ON(ret);
-
-       return 0;
-}
-
-module_init(tp_sample_trace_init);
-
-static void __exit tp_sample_trace_exit(void)
-{
-       unregister_trace_subsys_event(probe_subsys_event, NULL);
-       tracepoint_synchronize_unregister();
-}
-
-module_exit(tp_sample_trace_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mathieu Desnoyers");
-MODULE_DESCRIPTION("Tracepoint Probes Samples");
diff --git a/samples/tracepoints/tracepoint-sample.c b/samples/tracepoints/tracepoint-sample.c
deleted file mode 100644 (file)
index f4d89e0..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/* tracepoint-sample.c
- *
- * Executes a tracepoint when /proc/tracepoint-sample is opened.
- *
- * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
- *
- * This file is released under the GPLv2.
- * See the file COPYING for more details.
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/proc_fs.h>
-#include "tp-samples-trace.h"
-
-DEFINE_TRACE(subsys_event);
-DEFINE_TRACE(subsys_eventb);
-
-struct proc_dir_entry *pentry_sample;
-
-static int my_open(struct inode *inode, struct file *file)
-{
-       int i;
-
-       trace_subsys_event(inode, file);
-       for (i = 0; i < 10; i++)
-               trace_subsys_eventb();
-       return -EPERM;
-}
-
-static const struct file_operations mark_ops = {
-       .open = my_open,
-       .llseek = noop_llseek,
-};
-
-static int __init sample_init(void)
-{
-       printk(KERN_ALERT "sample init\n");
-       pentry_sample = proc_create("tracepoint-sample", 0444, NULL,
-               &mark_ops);
-       if (!pentry_sample)
-               return -EPERM;
-       return 0;
-}
-
-static void __exit sample_exit(void)
-{
-       printk(KERN_ALERT "sample exit\n");
-       remove_proc_entry("tracepoint-sample", NULL);
-}
-
-module_init(sample_init)
-module_exit(sample_exit)
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mathieu Desnoyers");
-MODULE_DESCRIPTION("Tracepoint sample");
index 4d2c7dfdaabd44abd21e53072eee8f8f884890e8..2bb08a962ce3168a7fe1c01d3f397349e0143bc5 100755 (executable)
@@ -230,12 +230,12 @@ our $Inline       = qr{inline|__always_inline|noinline};
 our $Member    = qr{->$Ident|\.$Ident|\[[^]]*\]};
 our $Lval      = qr{$Ident(?:$Member)*};
 
-our $Float_hex = qr{(?i:0x[0-9a-f]+p-?[0-9]+[fl]?)};
-our $Float_dec = qr{(?i:((?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?))};
-our $Float_int = qr{(?i:[0-9]+e-?[0-9]+[fl]?)};
+our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
+our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
+our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
 our $Float     = qr{$Float_hex|$Float_dec|$Float_int};
-our $Constant  = qr{(?:$Float|(?i:(?:0x[0-9a-f]+|[0-9]+)[ul]*))};
-our $Assignment        = qr{(?:\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=)};
+our $Constant  = qr{$Float|(?i)(?:0x[0-9a-f]+|[0-9]+)[ul]*};
+our $Assignment        = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};
 our $Compare    = qr{<=|>=|==|!=|<|>};
 our $Operators = qr{
                        <=|>=|==|!=|
index 0fe5a026aef8d22ac83619dd0bbc2ba50965b63e..579775088967fdb11a044beb75c70de48d572ad6 100644 (file)
@@ -709,16 +709,31 @@ static void cap_req_classify_flow(const struct request_sock *req,
 {
 }
 
+static int cap_tun_dev_alloc_security(void **security)
+{
+       return 0;
+}
+
+static void cap_tun_dev_free_security(void *security)
+{
+}
+
 static int cap_tun_dev_create(void)
 {
        return 0;
 }
 
-static void cap_tun_dev_post_create(struct sock *sk)
+static int cap_tun_dev_attach_queue(void *security)
+{
+       return 0;
+}
+
+static int cap_tun_dev_attach(struct sock *sk, void *security)
 {
+       return 0;
 }
 
-static int cap_tun_dev_attach(struct sock *sk)
+static int cap_tun_dev_open(void *security)
 {
        return 0;
 }
@@ -1050,8 +1065,11 @@ void __init security_fixup_ops(struct security_operations *ops)
        set_to_cap_if_null(ops, secmark_refcount_inc);
        set_to_cap_if_null(ops, secmark_refcount_dec);
        set_to_cap_if_null(ops, req_classify_flow);
+       set_to_cap_if_null(ops, tun_dev_alloc_security);
+       set_to_cap_if_null(ops, tun_dev_free_security);
        set_to_cap_if_null(ops, tun_dev_create);
-       set_to_cap_if_null(ops, tun_dev_post_create);
+       set_to_cap_if_null(ops, tun_dev_open);
+       set_to_cap_if_null(ops, tun_dev_attach_queue);
        set_to_cap_if_null(ops, tun_dev_attach);
 #endif /* CONFIG_SECURITY_NETWORK */
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
index 19ecc8de9e6bfd837e2589546fdfe49b04b23ed0..d794abcc4b3b7e5e87173ba8ca45deca5b6d09d8 100644 (file)
@@ -215,7 +215,9 @@ static void devcgroup_css_free(struct cgroup *cgroup)
        struct dev_cgroup *dev_cgroup;
 
        dev_cgroup = cgroup_to_devcgroup(cgroup);
+       mutex_lock(&devcgroup_mutex);
        dev_exception_clean(dev_cgroup);
+       mutex_unlock(&devcgroup_mutex);
        kfree(dev_cgroup);
 }
 
index dfb26918699c5be5cffcfe4f37295914561096d4..7dd538ef5b8319e645465eb62294158f3dbe271a 100644 (file)
@@ -205,9 +205,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
                rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
                                           &xattr_data,
                                           sizeof(xattr_data), 0);
-       }
-       else if (rc == -ENODATA)
+       } else if (rc == -ENODATA && inode->i_op->removexattr) {
                rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+       }
        return rc;
 }
 
index daa97f4ac9d13515909028bf1ffa6903d8aed0c7..7b88c6aeaed43e5f37b449fd8ec87610b3b69a93 100644 (file)
@@ -1254,24 +1254,42 @@ void security_secmark_refcount_dec(void)
 }
 EXPORT_SYMBOL(security_secmark_refcount_dec);
 
+int security_tun_dev_alloc_security(void **security)
+{
+       return security_ops->tun_dev_alloc_security(security);
+}
+EXPORT_SYMBOL(security_tun_dev_alloc_security);
+
+void security_tun_dev_free_security(void *security)
+{
+       security_ops->tun_dev_free_security(security);
+}
+EXPORT_SYMBOL(security_tun_dev_free_security);
+
 int security_tun_dev_create(void)
 {
        return security_ops->tun_dev_create();
 }
 EXPORT_SYMBOL(security_tun_dev_create);
 
-void security_tun_dev_post_create(struct sock *sk)
+int security_tun_dev_attach_queue(void *security)
 {
-       return security_ops->tun_dev_post_create(sk);
+       return security_ops->tun_dev_attach_queue(security);
 }
-EXPORT_SYMBOL(security_tun_dev_post_create);
+EXPORT_SYMBOL(security_tun_dev_attach_queue);
 
-int security_tun_dev_attach(struct sock *sk)
+int security_tun_dev_attach(struct sock *sk, void *security)
 {
-       return security_ops->tun_dev_attach(sk);
+       return security_ops->tun_dev_attach(sk, security);
 }
 EXPORT_SYMBOL(security_tun_dev_attach);
 
+int security_tun_dev_open(void *security)
+{
+       return security_ops->tun_dev_open(security);
+}
+EXPORT_SYMBOL(security_tun_dev_open);
+
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
index 61a53367d0292600fbb2d11da95c21118164fca4..ef26e9611ffbab91ad50faefa81a56a425cefb93 100644 (file)
@@ -4399,6 +4399,24 @@ static void selinux_req_classify_flow(const struct request_sock *req,
        fl->flowi_secid = req->secid;
 }
 
+static int selinux_tun_dev_alloc_security(void **security)
+{
+       struct tun_security_struct *tunsec;
+
+       tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL);
+       if (!tunsec)
+               return -ENOMEM;
+       tunsec->sid = current_sid();
+
+       *security = tunsec;
+       return 0;
+}
+
+static void selinux_tun_dev_free_security(void *security)
+{
+       kfree(security);
+}
+
 static int selinux_tun_dev_create(void)
 {
        u32 sid = current_sid();
@@ -4414,8 +4432,17 @@ static int selinux_tun_dev_create(void)
                            NULL);
 }
 
-static void selinux_tun_dev_post_create(struct sock *sk)
+static int selinux_tun_dev_attach_queue(void *security)
 {
+       struct tun_security_struct *tunsec = security;
+
+       return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
+                           TUN_SOCKET__ATTACH_QUEUE, NULL);
+}
+
+static int selinux_tun_dev_attach(struct sock *sk, void *security)
+{
+       struct tun_security_struct *tunsec = security;
        struct sk_security_struct *sksec = sk->sk_security;
 
        /* we don't currently perform any NetLabel based labeling here and it
@@ -4425,20 +4452,19 @@ static void selinux_tun_dev_post_create(struct sock *sk)
         * cause confusion to the TUN user that had no idea network labeling
         * protocols were being used */
 
-       /* see the comments in selinux_tun_dev_create() about why we don't use
-        * the sockcreate SID here */
-
-       sksec->sid = current_sid();
+       sksec->sid = tunsec->sid;
        sksec->sclass = SECCLASS_TUN_SOCKET;
+
+       return 0;
 }
 
-static int selinux_tun_dev_attach(struct sock *sk)
+static int selinux_tun_dev_open(void *security)
 {
-       struct sk_security_struct *sksec = sk->sk_security;
+       struct tun_security_struct *tunsec = security;
        u32 sid = current_sid();
        int err;
 
-       err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET,
+       err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,
                           TUN_SOCKET__RELABELFROM, NULL);
        if (err)
                return err;
@@ -4446,8 +4472,7 @@ static int selinux_tun_dev_attach(struct sock *sk)
                           TUN_SOCKET__RELABELTO, NULL);
        if (err)
                return err;
-
-       sksec->sid = sid;
+       tunsec->sid = sid;
 
        return 0;
 }
@@ -5642,9 +5667,12 @@ static struct security_operations selinux_ops = {
        .secmark_refcount_inc =         selinux_secmark_refcount_inc,
        .secmark_refcount_dec =         selinux_secmark_refcount_dec,
        .req_classify_flow =            selinux_req_classify_flow,
+       .tun_dev_alloc_security =       selinux_tun_dev_alloc_security,
+       .tun_dev_free_security =        selinux_tun_dev_free_security,
        .tun_dev_create =               selinux_tun_dev_create,
-       .tun_dev_post_create =          selinux_tun_dev_post_create,
+       .tun_dev_attach_queue =         selinux_tun_dev_attach_queue,
        .tun_dev_attach =               selinux_tun_dev_attach,
+       .tun_dev_open =                 selinux_tun_dev_open,
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
        .xfrm_policy_alloc_security =   selinux_xfrm_policy_alloc,
index df2de54a958debfab56caf3483cd5d93856e5be4..14d04e63b1f0e09ef15b4cf64057bd7cc861ed1d 100644 (file)
@@ -150,6 +150,6 @@ struct security_class_mapping secclass_map[] = {
            NULL } },
        { "kernel_service", { "use_as_override", "create_files_as", NULL } },
        { "tun_socket",
-         { COMMON_SOCK_PERMS, NULL } },
+         { COMMON_SOCK_PERMS, "attach_queue", NULL } },
        { NULL }
   };
index 26c7eee1c309b0a2f45a23c8b70d2157841c1670..aa47bcabb5f65e728aadbaa39cdecfa55d20aa16 100644 (file)
@@ -110,6 +110,10 @@ struct sk_security_struct {
        u16 sclass;                     /* sock security class */
 };
 
+struct tun_security_struct {
+       u32 sid;                        /* SID for the tun device sockets */
+};
+
 struct key_security_struct {
        u32 sid;        /* SID of key */
 };
index 6fc0ae90e5b1de99992b288d35035e2a0d061df2..fff7753e35c15ba6f82612594fbda19e7203b66e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/gpio.h>
 
 #include <sound/ac97_codec.h>
 #include <sound/pxa2xx-lib.h>
@@ -148,6 +149,8 @@ static inline void pxa_ac97_warm_pxa27x(void)
 
 static inline void pxa_ac97_cold_pxa27x(void)
 {
+       unsigned int timeout;
+
        GCR &=  GCR_COLD_RST;  /* clear everything but nCRST */
        GCR &= ~GCR_COLD_RST;  /* then assert nCRST */
 
@@ -157,8 +160,10 @@ static inline void pxa_ac97_cold_pxa27x(void)
        clk_enable(ac97conf_clk);
        udelay(5);
        clk_disable(ac97conf_clk);
-       GCR = GCR_COLD_RST;
-       udelay(50);
+       GCR = GCR_COLD_RST | GCR_WARM_RST;
+       timeout = 100;     /* wait for the codec-ready bit to be set */
+       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+               mdelay(1);
 }
 #endif
 
@@ -340,8 +345,21 @@ int pxa2xx_ac97_hw_probe(struct platform_device *dev)
        }
 
        if (cpu_is_pxa27x()) {
-               /* Use GPIO 113 as AC97 Reset on Bulverde */
+               /*
+                * This gpio is needed for a work-around to a bug in the ac97
+                * controller during warm reset.  The direction and level is set
+                * here so that it is an output driven high when switching from
+                * AC97_nRESET alt function to generic gpio.
+                */
+               ret = gpio_request_one(reset_gpio, GPIOF_OUT_INIT_HIGH,
+                                      "pxa27x ac97 reset");
+               if (ret < 0) {
+                       pr_err("%s: gpio_request_one() failed: %d\n",
+                              __func__, ret);
+                       goto err_conf;
+               }
                pxa27x_assert_ac97reset(reset_gpio, 0);
+
                ac97conf_clk = clk_get(&dev->dev, "AC97CONFCLK");
                if (IS_ERR(ac97conf_clk)) {
                        ret = PTR_ERR(ac97conf_clk);
@@ -384,6 +402,8 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_hw_probe);
 
 void pxa2xx_ac97_hw_remove(struct platform_device *dev)
 {
+       if (cpu_is_pxa27x())
+               gpio_free(reset_gpio);
        GCR |= GCR_ACLINK_OFF;
        free_irq(IRQ_AC97, NULL);
        if (ac97conf_clk) {
index b8fb0a5adb9b54a67c687d5da6b142a917bcb261..822df971972c1ff54ea7886a3524356ff92fca50 100644 (file)
@@ -3654,6 +3654,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
        hda_set_power_state(codec, AC_PWRST_D0);
        restore_shutup_pins(codec);
        hda_exec_init_verbs(codec);
+       snd_hda_jack_set_dirty_all(codec);
        if (codec->patch_ops.resume)
                codec->patch_ops.resume(codec);
        else {
@@ -3665,10 +3666,8 @@ static void hda_call_codec_resume(struct hda_codec *codec)
 
        if (codec->jackpoll_interval)
                hda_jackpoll_work(&codec->jackpoll_work.work);
-       else {
-               snd_hda_jack_set_dirty_all(codec);
+       else
                snd_hda_jack_report_sync(codec);
-       }
 
        codec->in_pm = 0;
        snd_hda_power_down(codec); /* flag down before returning */
index cca87277baf088b273279c6b1621aa52a981a47e..c78286f6e5d8133e070d34e3927a12c70be626f1 100644 (file)
@@ -573,9 +573,12 @@ enum {
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 
 /* quirks for Intel PCH */
-#define AZX_DCAPS_INTEL_PCH \
+#define AZX_DCAPS_INTEL_PCH_NOPM \
        (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
-        AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
+        AZX_DCAPS_COUNT_LPIB_DELAY)
+
+#define AZX_DCAPS_INTEL_PCH \
+       (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
 
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
@@ -653,29 +656,43 @@ static char *driver_short_names[] = {
 #define get_azx_dev(substream) (substream->runtime->private_data)
 
 #ifdef CONFIG_X86
-static void __mark_pages_wc(struct azx *chip, void *addr, size_t size, bool on)
+static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
 {
+       int pages;
+
        if (azx_snoop(chip))
                return;
-       if (addr && size) {
-               int pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (!dmab || !dmab->area || !dmab->bytes)
+               return;
+
+#ifdef CONFIG_SND_DMA_SGBUF
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
+               struct snd_sg_buf *sgbuf = dmab->private_data;
                if (on)
-                       set_memory_wc((unsigned long)addr, pages);
+                       set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
                else
-                       set_memory_wb((unsigned long)addr, pages);
+                       set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
+               return;
        }
+#endif
+
+       pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (on)
+               set_memory_wc((unsigned long)dmab->area, pages);
+       else
+               set_memory_wb((unsigned long)dmab->area, pages);
 }
 
 static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
                                 bool on)
 {
-       __mark_pages_wc(chip, buf->area, buf->bytes, on);
+       __mark_pages_wc(chip, buf, on);
 }
 static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
-                                  struct snd_pcm_runtime *runtime, bool on)
+                                  struct snd_pcm_substream *substream, bool on)
 {
        if (azx_dev->wc_marked != on) {
-               __mark_pages_wc(chip, runtime->dma_area, runtime->dma_bytes, on);
+               __mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on);
                azx_dev->wc_marked = on;
        }
 }
@@ -686,7 +703,7 @@ static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
 {
 }
 static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
-                                  struct snd_pcm_runtime *runtime, bool on)
+                                  struct snd_pcm_substream *substream, bool on)
 {
 }
 #endif
@@ -1965,11 +1982,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
 {
        struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
        struct azx *chip = apcm->chip;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct azx_dev *azx_dev = get_azx_dev(substream);
        int ret;
 
-       mark_runtime_wc(chip, azx_dev, runtime, false);
+       mark_runtime_wc(chip, azx_dev, substream, false);
        azx_dev->bufsize = 0;
        azx_dev->period_bytes = 0;
        azx_dev->format_val = 0;
@@ -1977,7 +1993,7 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
                                        params_buffer_bytes(hw_params));
        if (ret < 0)
                return ret;
-       mark_runtime_wc(chip, azx_dev, runtime, true);
+       mark_runtime_wc(chip, azx_dev, substream, true);
        return ret;
 }
 
@@ -1986,7 +2002,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
        struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
        struct azx_dev *azx_dev = get_azx_dev(substream);
        struct azx *chip = apcm->chip;
-       struct snd_pcm_runtime *runtime = substream->runtime;
        struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
 
        /* reset BDL address */
@@ -1999,7 +2014,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
 
        snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
 
-       mark_runtime_wc(chip, azx_dev, runtime, false);
+       mark_runtime_wc(chip, azx_dev, substream, false);
        return snd_pcm_lib_free_pages(substream);
 }
 
@@ -3586,13 +3601,13 @@ static void azx_remove(struct pci_dev *pci)
 static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* CPT */
        { PCI_DEVICE(0x8086, 0x1c20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* PBG */
        { PCI_DEVICE(0x8086, 0x1d20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Panther Point */
        { PCI_DEVICE(0x8086, 0x1e20),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* Lynx Point */
        { PCI_DEVICE(0x8086, 0x8c20),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
@@ -3610,13 +3625,12 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* 5 Series/3400 */
        { PCI_DEVICE(0x8086, 0x3b56),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
-       /* SCH */
+       /* Poulsbo */
        { PCI_DEVICE(0x8086, 0x811b),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
+       /* Oaktrail */
        { PCI_DEVICE(0x8086, 0x080a),
-         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Oaktrail */
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM },
        /* ICH */
        { PCI_DEVICE(0x8086, 0x2668),
          .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
index 60890bfecc196600ffa15ab5979c06aef8891f9b..009b77a693cf3326a3cdec7b6fa335a4c51c80be 100644 (file)
@@ -558,24 +558,12 @@ static int conexant_build_controls(struct hda_codec *codec)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int conexant_suspend(struct hda_codec *codec)
-{
-       snd_hda_shutup_pins(codec);
-       return 0;
-}
-#endif
-
 static const struct hda_codec_ops conexant_patch_ops = {
        .build_controls = conexant_build_controls,
        .build_pcms = conexant_build_pcms,
        .init = conexant_init,
        .free = conexant_free,
        .set_power_state = conexant_set_power,
-#ifdef CONFIG_PM
-       .suspend = conexant_suspend,
-#endif
-       .reboot_notify = snd_hda_shutup_pins,
 };
 
 #ifdef CONFIG_SND_HDA_INPUT_BEEP
@@ -4405,10 +4393,6 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
        .init = cx_auto_init,
        .free = conexant_free,
        .unsol_event = snd_hda_jack_unsol_event,
-#ifdef CONFIG_PM
-       .suspend = conexant_suspend,
-#endif
-       .reboot_notify = snd_hda_shutup_pins,
 };
 
 /*
@@ -4652,6 +4636,12 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_conexant_auto },
        { .id = 0x14f15111, .name = "CX20753/4",
          .patch = patch_conexant_auto },
+       { .id = 0x14f15113, .name = "CX20755",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f15114, .name = "CX20756",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f15115, .name = "CX20757",
+         .patch = patch_conexant_auto },
        {} /* terminator */
 };
 
@@ -4675,6 +4665,9 @@ MODULE_ALIAS("snd-hda-codec-id:14f150b9");
 MODULE_ALIAS("snd-hda-codec-id:14f1510f");
 MODULE_ALIAS("snd-hda-codec-id:14f15110");
 MODULE_ALIAS("snd-hda-codec-id:14f15111");
+MODULE_ALIAS("snd-hda-codec-id:14f15113");
+MODULE_ALIAS("snd-hda-codec-id:14f15114");
+MODULE_ALIAS("snd-hda-codec-id:14f15115");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Conexant HD-audio codec");
index b6c21ea187ca00df7233416f516111567b6b9ca7..807a2aa1ff384e5a54f64a6f951f8d8e44e3061b 100644 (file)
@@ -1502,7 +1502,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
        ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
        substream = snd_pcm_chmap_substream(info, ctl_idx);
        if (!substream || !substream->runtime)
-               return -EBADFD;
+               return 0; /* just for avoiding error from alsactl restore */
        switch (substream->runtime->status->state) {
        case SNDRV_PCM_STATE_OPEN:
        case SNDRV_PCM_STATE_SETUP:
index 71ae23dd71035edd0214fb8a4f209959cde932d1..5faaad219a7f37e588e8b5a27b298cbb7adfa69d 100644 (file)
@@ -4694,6 +4694,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_FIXUP_VOL_KNOB),
        SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
        SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
+       SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST),
        SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
        SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
        SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
@@ -5708,6 +5709,7 @@ static const struct alc_model_fixup alc268_fixup_models[] = {
 };
 
 static const struct snd_pci_quirk alc268_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1025, 0x015b, "Acer AOA 150 (ZG5)", ALC268_FIXUP_INV_DMIC),
        /* below is codec SSID since multiple Toshiba laptops have the
         * same PCI SSID 1179:ff00
         */
@@ -5817,6 +5819,9 @@ enum {
        ALC269_TYPE_ALC269VB,
        ALC269_TYPE_ALC269VC,
        ALC269_TYPE_ALC269VD,
+       ALC269_TYPE_ALC280,
+       ALC269_TYPE_ALC282,
+       ALC269_TYPE_ALC284,
 };
 
 /*
@@ -5833,10 +5838,13 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        switch (spec->codec_variant) {
        case ALC269_TYPE_ALC269VA:
        case ALC269_TYPE_ALC269VC:
+       case ALC269_TYPE_ALC280:
+       case ALC269_TYPE_ALC284:
                ssids = alc269va_ssids;
                break;
        case ALC269_TYPE_ALC269VB:
        case ALC269_TYPE_ALC269VD:
+       case ALC269_TYPE_ALC282:
                ssids = alc269_ssids;
                break;
        default:
@@ -6245,6 +6253,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x1972, "HP Pavilion 17", ALC269_FIXUP_MIC1_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x1977, "HP Pavilion 14", ALC269_FIXUP_MIC1_MUTE_LED),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -6259,6 +6268,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+       SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
@@ -6400,7 +6410,8 @@ static int patch_alc269(struct hda_codec *codec)
 
        alc_auto_parse_customize_define(codec);
 
-       if (codec->vendor_id == 0x10ec0269) {
+       switch (codec->vendor_id) {
+       case 0x10ec0269:
                spec->codec_variant = ALC269_TYPE_ALC269VA;
                switch (alc_get_coef0(codec) & 0x00f0) {
                case 0x0010:
@@ -6425,6 +6436,20 @@ static int patch_alc269(struct hda_codec *codec)
                        goto error;
                spec->init_hook = alc269_fill_coef;
                alc269_fill_coef(codec);
+               break;
+
+       case 0x10ec0280:
+       case 0x10ec0290:
+               spec->codec_variant = ALC269_TYPE_ALC280;
+               break;
+       case 0x10ec0282:
+       case 0x10ec0283:
+               spec->codec_variant = ALC269_TYPE_ALC282;
+               break;
+       case 0x10ec0284:
+       case 0x10ec0292:
+               spec->codec_variant = ALC269_TYPE_ALC284;
+               break;
        }
 
        /* automatic parse from the BIOS config */
@@ -7129,6 +7154,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
        { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
        { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
+       { .id = 0x10ec0284, .name = "ALC284", .patch = patch_alc269 },
        { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
        { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
index 6e02e064d7b43df1159554899f1007a9f223bc7b..223c3d9cc69efa1af38e6dee6c45ffc0f3733c55 100644 (file)
@@ -441,6 +441,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 */
 /* status */
 #define HDSPM_AES32_wcLock     0x0200000
+#define HDSPM_AES32_wcSync     0x0100000
 #define HDSPM_AES32_wcFreq_bit  22
 /* (status >> HDSPM_AES32_wcFreq_bit) & 0xF gives WC frequency (cf function
   HDSPM_bit2freq */
@@ -3467,10 +3468,12 @@ static int hdspm_wc_sync_check(struct hdspm *hdspm)
        switch (hdspm->io_type) {
        case AES32:
                status = hdspm_read(hdspm, HDSPM_statusRegister);
-               if (status & HDSPM_wcSync)
-                       return 2;
-               else if (status & HDSPM_wcLock)
-                       return 1;
+               if (status & HDSPM_AES32_wcLock) {
+                       if (status & HDSPM_AES32_wcSync)
+                               return 2;
+                       else
+                               return 1;
+               }
                return 0;
                break;
 
@@ -4658,6 +4661,7 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
        unsigned int status;
        unsigned int status2;
        unsigned int timecode;
+       unsigned int wcLock, wcSync;
        int pref_syncref;
        char *autosync_ref;
        int x;
@@ -4751,8 +4755,11 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
 
        snd_iprintf(buffer, "--- Status:\n");
 
+       wcLock = status & HDSPM_AES32_wcLock;
+       wcSync = wcLock && (status & HDSPM_AES32_wcSync);
+
        snd_iprintf(buffer, "Word: %s  Frequency: %d\n",
-                   (status & HDSPM_AES32_wcLock) ? "Sync   " : "No Lock",
+                   (wcLock) ? (wcSync ? "Sync   " : "Lock   ") : "No Lock",
                    HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF));
 
        for (x = 0; x < 8; x++) {
index adf397b9d0e650eb19768e7bc6c592352014a595..ef62c435848eb776421776d7e80e90527e3f2b72 100644 (file)
@@ -446,15 +446,9 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_DSP_A:
                mode = 0;
                break;
-       case SND_SOC_DAIFMT_DSP_B:
-               mode = 1;
-               break;
        case SND_SOC_DAIFMT_I2S:
                mode = 2;
                break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               mode = 3;
-               break;
        default:
                arizona_aif_err(dai, "Unsupported DAI format %d\n",
                                fmt & SND_SOC_DAIFMT_FORMAT_MASK);
@@ -691,7 +685,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
        }
        sr_val = i;
 
-       lrclk = snd_soc_params_to_bclk(params) / params_rate(params);
+       lrclk = rates[bclk] / params_rate(params);
 
        arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",
                        rates[bclk], rates[bclk] / lrclk);
@@ -714,7 +708,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
                snd_soc_update_bits(codec, ARIZONA_ASYNC_SAMPLE_RATE_1,
                                    ARIZONA_ASYNC_SAMPLE_RATE_MASK, sr_val);
                snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
-                                   ARIZONA_AIF1_RATE_MASK, 8);
+                                   ARIZONA_AIF1_RATE_MASK,
+                                   8 << ARIZONA_AIF1_RATE_SHIFT);
                break;
        default:
                arizona_aif_err(dai, "Invalid clock %d\n", dai_priv->clk);
@@ -1087,6 +1082,9 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
                        id, ret);
        }
 
+       regmap_update_bits(arizona->regmap, fll->base + 1,
+                          ARIZONA_FLL1_FREERUN, 0);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(arizona_init_fll);
index 41dae1ed3b714a4734448b1d3c99938af1fe9d9a..4deebeb0717754085edceb38e7c00b141f4502fb 100644 (file)
 
 #define ARIZONA_FLL_SRC_MCLK1      0
 #define ARIZONA_FLL_SRC_MCLK2      1
-#define ARIZONA_FLL_SRC_SLIMCLK    2
-#define ARIZONA_FLL_SRC_FLL1       3
-#define ARIZONA_FLL_SRC_FLL2       4
-#define ARIZONA_FLL_SRC_AIF1BCLK   5
-#define ARIZONA_FLL_SRC_AIF2BCLK   6
-#define ARIZONA_FLL_SRC_AIF3BCLK   7
-#define ARIZONA_FLL_SRC_AIF1LRCLK  8
-#define ARIZONA_FLL_SRC_AIF2LRCLK  9
-#define ARIZONA_FLL_SRC_AIF3LRCLK 10
+#define ARIZONA_FLL_SRC_SLIMCLK    3
+#define ARIZONA_FLL_SRC_FLL1       4
+#define ARIZONA_FLL_SRC_FLL2       5
+#define ARIZONA_FLL_SRC_AIF1BCLK   8
+#define ARIZONA_FLL_SRC_AIF2BCLK   9
+#define ARIZONA_FLL_SRC_AIF3BCLK  10
+#define ARIZONA_FLL_SRC_AIF1LRCLK 12
+#define ARIZONA_FLL_SRC_AIF2LRCLK 13
+#define ARIZONA_FLL_SRC_AIF3LRCLK 14
 
 #define ARIZONA_MIXER_VOL_MASK             0x00FE
 #define ARIZONA_MIXER_VOL_SHIFT                 1
index 4f1127935fdf5f9578970b13fcf73b8815d469e5..ac8742a1f25ab7c69fcca2b22458ae02acf3d67e 100644 (file)
@@ -474,16 +474,16 @@ static int cs4271_probe(struct snd_soc_codec *codec)
        struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
        int ret;
        int gpio_nreset = -EINVAL;
-       int amutec_eq_bmutec = 0;
+       bool amutec_eq_bmutec = false;
 
 #ifdef CONFIG_OF
        if (of_match_device(cs4271_dt_ids, codec->dev)) {
                gpio_nreset = of_get_named_gpio(codec->dev->of_node,
                                                "reset-gpio", 0);
 
-               if (!of_get_property(codec->dev->of_node,
+               if (of_get_property(codec->dev->of_node,
                                     "cirrus,amutec-eq-bmutec", NULL))
-                       amutec_eq_bmutec = 1;
+                       amutec_eq_bmutec = true;
        }
 #endif
 
index 99bb1c69499e42a6791ae12c55dde2d53efb7f49..9811a5478c87649da6c42806a3a43bfcc5a5767e 100644 (file)
@@ -737,7 +737,7 @@ static const struct cs42l52_clk_para clk_map_table[] = {
 
 static int cs42l52_get_clk(int mclk, int rate)
 {
-       int i, ret = 0;
+       int i, ret = -EINVAL;
        u_int mclk1, mclk2 = 0;
 
        for (i = 0; i < ARRAY_SIZE(clk_map_table); i++) {
@@ -749,8 +749,6 @@ static int cs42l52_get_clk(int mclk, int rate)
                        }
                }
        }
-       if (ret > ARRAY_SIZE(clk_map_table))
-               return -EINVAL;
        return ret;
 }
 
index d75257d40a496d26e9bb513f6fd000abe6f13a76..e19490cfb3a8bde2036e61d877b3a8c407cc510c 100644 (file)
@@ -111,9 +111,9 @@ static struct reg_default lm49453_reg_defs[] = {
        { 101, 0x00 },
        { 102, 0x00 },
        { 103, 0x01 },
-       { 105, 0x01 },
-       { 106, 0x00 },
-       { 107, 0x01 },
+       { 104, 0x01 },
+       { 105, 0x00 },
+       { 106, 0x01 },
        { 107, 0x00 },
        { 108, 0x00 },
        { 109, 0x00 },
@@ -163,56 +163,25 @@ static struct reg_default lm49453_reg_defs[] = {
        { 184, 0x00 },
        { 185, 0x00 },
        { 186, 0x00 },
-       { 189, 0x00 },
+       { 187, 0x00 },
        { 188, 0x00 },
-       { 194, 0x00 },
-       { 195, 0x00 },
-       { 196, 0x00 },
-       { 197, 0x00 },
-       { 200, 0x00 },
-       { 201, 0x00 },
-       { 202, 0x00 },
-       { 203, 0x00 },
-       { 204, 0x00 },
-       { 205, 0x00 },
-       { 208, 0x00 },
+       { 189, 0x00 },
+       { 208, 0x06 },
        { 209, 0x00 },
-       { 210, 0x00 },
-       { 211, 0x00 },
-       { 213, 0x00 },
-       { 214, 0x00 },
-       { 215, 0x00 },
-       { 216, 0x00 },
-       { 217, 0x00 },
-       { 218, 0x00 },
-       { 219, 0x00 },
+       { 210, 0x08 },
+       { 211, 0x54 },
+       { 212, 0x14 },
+       { 213, 0x0d },
+       { 214, 0x0d },
+       { 215, 0x14 },
+       { 216, 0x60 },
        { 221, 0x00 },
        { 222, 0x00 },
+       { 223, 0x00 },
        { 224, 0x00 },
-       { 225, 0x00 },
-       { 226, 0x00 },
-       { 227, 0x00 },
-       { 228, 0x00 },
-       { 229, 0x00 },
-       { 230, 0x13 },
-       { 231, 0x00 },
-       { 232, 0x80 },
-       { 233, 0x0C },
-       { 234, 0xDD },
-       { 235, 0x00 },
-       { 236, 0x04 },
-       { 237, 0x00 },
-       { 238, 0x00 },
-       { 239, 0x00 },
-       { 240, 0x00 },
-       { 241, 0x00 },
-       { 242, 0x00 },
-       { 243, 0x00 },
-       { 244, 0x00 },
-       { 245, 0x00 },
        { 248, 0x00 },
        { 249, 0x00 },
-       { 254, 0x00 },
+       { 250, 0x00 },
        { 255, 0x00 },
 };
 
@@ -525,36 +494,41 @@ SOC_DAPM_SINGLE("Port2_2 Switch", LM49453_P0_PORT2_TX2_REG, 7, 1, 0),
 };
 
 /* TLV Declarations */
-static const DECLARE_TLV_DB_SCALE(digital_tlv, -7650, 150, 1);
-static const DECLARE_TLV_DB_SCALE(port_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(adc_dac_tlv, -7650, 150, 1);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 200, 1);
+static const DECLARE_TLV_DB_SCALE(port_tlv, -1800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(stn_tlv, -7200, 150, 0);
 
 static const struct snd_kcontrol_new lm49453_sidetone_mixer_controls[] = {
 /* Sidetone supports mono only */
 SOC_DAPM_SINGLE_TLV("Sidetone ADCL Volume", LM49453_P0_STN_VOL_ADCL_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone ADCR Volume", LM49453_P0_STN_VOL_ADCR_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC1L Volume", LM49453_P0_STN_VOL_DMIC1L_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC1R Volume", LM49453_P0_STN_VOL_DMIC1R_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC2L Volume", LM49453_P0_STN_VOL_DMIC2L_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 SOC_DAPM_SINGLE_TLV("Sidetone DMIC2R Volume", LM49453_P0_STN_VOL_DMIC2R_REG,
-                    0, 0x3F, 0, digital_tlv),
+                    0, 0x3F, 0, stn_tlv),
 };
 
 static const struct snd_kcontrol_new lm49453_snd_controls[] = {
        /* mic1 and mic2 supports mono only */
-       SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_ADC_LEVELL_REG, 0, 6,
-                       0, digital_tlv),
-       SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_ADC_LEVELR_REG, 0, 6,
-                       0, digital_tlv),
+       SOC_SINGLE_TLV("Mic1 Volume", LM49453_P0_MICL_REG, 0, 15, 0, mic_tlv),
+       SOC_SINGLE_TLV("Mic2 Volume", LM49453_P0_MICR_REG, 0, 15, 0, mic_tlv),
+
+       SOC_SINGLE_TLV("ADCL Volume", LM49453_P0_ADC_LEVELL_REG, 0, 63,
+                       0, adc_dac_tlv),
+       SOC_SINGLE_TLV("ADCR Volume", LM49453_P0_ADC_LEVELR_REG, 0, 63,
+                       0, adc_dac_tlv),
 
        SOC_DOUBLE_R_TLV("DMIC1 Volume", LM49453_P0_DMIC1_LEVELL_REG,
-                         LM49453_P0_DMIC1_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DMIC1_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DMIC2 Volume", LM49453_P0_DMIC2_LEVELL_REG,
-                         LM49453_P0_DMIC2_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DMIC2_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
 
        SOC_DAPM_ENUM("Mic2Mode", lm49453_mic2mode_enum),
        SOC_DAPM_ENUM("DMIC12 SRC", lm49453_dmic12_cfg_enum),
@@ -569,16 +543,16 @@ static const struct snd_kcontrol_new lm49453_snd_controls[] = {
                                          2, 1, 0),
 
        SOC_DOUBLE_R_TLV("DAC HP Volume", LM49453_P0_DAC_HP_LEVELL_REG,
-                         LM49453_P0_DAC_HP_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_HP_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DAC LO Volume", LM49453_P0_DAC_LO_LEVELL_REG,
-                         LM49453_P0_DAC_LO_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_LO_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DAC LS Volume", LM49453_P0_DAC_LS_LEVELL_REG,
-                         LM49453_P0_DAC_LS_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_LS_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
        SOC_DOUBLE_R_TLV("DAC HA Volume", LM49453_P0_DAC_HA_LEVELL_REG,
-                         LM49453_P0_DAC_HA_LEVELR_REG, 0, 6, 0, digital_tlv),
+                         LM49453_P0_DAC_HA_LEVELR_REG, 0, 63, 0, adc_dac_tlv),
 
        SOC_SINGLE_TLV("EP Volume", LM49453_P0_DAC_LS_LEVELL_REG,
-                       0, 6, 0, digital_tlv),
+                       0, 63, 0, adc_dac_tlv),
 
        SOC_SINGLE_TLV("PORT1_1_RX_LVL Volume", LM49453_P0_PORT1_RX_LVL1_REG,
                        0, 3, 0, port_tlv),
@@ -1218,7 +1192,7 @@ static int lm49453_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        }
 
        snd_soc_update_bits(codec, LM49453_P0_AUDIO_PORT1_BASIC_REG,
-                           LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(1)|BIT(5),
+                           LM49453_AUDIO_PORT1_BASIC_FMT_MASK|BIT(0)|BIT(5),
                            (aif_val | mode | clk_phase));
 
        snd_soc_write(codec, LM49453_P0_AUDIO_PORT1_RX_MSB_REG, clk_shift);
index cb1675cd8e1c5ffd9b404b9d12ee9c404f6f2efd..92bbfec9b107a0ec5ee55205b9272ce9e67caa74 100644 (file)
@@ -401,7 +401,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
                        5, 1, 0),
 
        SOC_SINGLE_TLV("Mic Volume", SGTL5000_CHIP_MIC_CTRL,
-                       0, 4, 0, mic_gain_tlv),
+                       0, 3, 0, mic_gain_tlv),
 };
 
 /* mute the codec used by alsa core */
@@ -1344,7 +1344,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
                        SGTL5000_HP_ZCD_EN |
                        SGTL5000_ADC_ZCD_EN);
 
-       snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 0);
+       snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 2);
 
        /*
         * disable DAP
index ab355c4f0b2de0aa786e754360a6a877a0855756..40c07be9b5814f170f73fa1b3670d3813101bb67 100644 (file)
                                SNDRV_PCM_FMTBIT_S32_LE)
 #define        S2PC_VALUE              0x98
 #define CLOCK_OUT              0x60
-#define LEFT_J_DATA_FORMAT     0x10
-#define I2S_DATA_FORMAT                0x12
-#define RIGHT_J_DATA_FORMAT    0x14
+#define DATA_FORMAT_MSK                0x0E
+#define LEFT_J_DATA_FORMAT     0x00
+#define I2S_DATA_FORMAT                0x02
+#define RIGHT_J_DATA_FORMAT    0x04
 #define CODEC_MUTE_VAL         0x80
 
 #define POWER_CNTLMSAK         0x40
@@ -289,7 +290,7 @@ static int sta529_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, STA529_S2PCFG0, 0x0D, mode);
+       snd_soc_update_bits(codec, STA529_S2PCFG0, DATA_FORMAT_MSK, mode);
 
        return 0;
 }
index 1cbe88f01d634137e0d7e85218556e1c81710d59..12bcae63a7f020f2dab718b535ad8b3d41d49ccd 100644 (file)
@@ -209,9 +209,9 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
 
        ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
        if (wm2000->speech_clarity)
-               ret &= ~WM2000_SPEECH_CLARITY;
-       else
                ret |= WM2000_SPEECH_CLARITY;
+       else
+               ret &= ~WM2000_SPEECH_CLARITY;
        wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
 
        wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
index afcf31df77e06efe19c709630f53990741348227..d8c65f5746583474b4cdfb286d339c512f3562d6 100644 (file)
@@ -1019,8 +1019,6 @@ static const char *wm2200_mixer_texts[] = {
        "EQR",
        "LHPF1",
        "LHPF2",
-       "LHPF3",
-       "LHPF4",
        "DSP1.1",
        "DSP1.2",
        "DSP1.3",
@@ -1053,7 +1051,6 @@ static int wm2200_mixer_values[] = {
        0x25,
        0x50,   /* EQ */
        0x51,
-       0x52,
        0x60,   /* LHPF1 */
        0x61,   /* LHPF2 */
        0x68,   /* DSP1 */
@@ -1566,15 +1563,9 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_DSP_A:
                fmt_val = 0;
                break;
-       case SND_SOC_DAIFMT_DSP_B:
-               fmt_val = 1;
-               break;
        case SND_SOC_DAIFMT_I2S:
                fmt_val = 2;
                break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               fmt_val = 3;
-               break;
        default:
                dev_err(codec->dev, "Unsupported DAI format %d\n",
                        fmt & SND_SOC_DAIFMT_FORMAT_MASK);
@@ -1626,7 +1617,7 @@ static int wm2200_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                            WM2200_AIF1TX_LRCLK_MSTR | WM2200_AIF1TX_LRCLK_INV,
                            lrclk);
        snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_5,
-                           WM2200_AIF1_FMT_MASK << 1, fmt_val << 1);
+                           WM2200_AIF1_FMT_MASK, fmt_val);
 
        return 0;
 }
index 5a5f3693623568d2d6268e9bc10e7055664c5d3d..54397a50807379bf2e625b4f980773ad72515164 100644 (file)
@@ -1279,15 +1279,9 @@ static int wm5100_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        case SND_SOC_DAIFMT_DSP_A:
                mask = 0;
                break;
-       case SND_SOC_DAIFMT_DSP_B:
-               mask = 1;
-               break;
        case SND_SOC_DAIFMT_I2S:
                mask = 2;
                break;
-       case SND_SOC_DAIFMT_LEFT_J:
-               mask = 3;
-               break;
        default:
                dev_err(codec->dev, "Unsupported DAI format %d\n",
                        fmt & SND_SOC_DAIFMT_FORMAT_MASK);
index 688ade0805897557baac586aeb6387a70361fd2c..988e817dca05fc15849391c968958eaf836de9a7 100644 (file)
@@ -36,6 +36,9 @@
 struct wm5102_priv {
        struct arizona_priv core;
        struct arizona_fll fll[2];
+
+       unsigned int spk_ena:2;
+       unsigned int spk_ena_pending:1;
 };
 
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
@@ -787,6 +790,47 @@ ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE),
 };
 
+static int wm5102_spk_ev(struct snd_soc_dapm_widget *w,
+                        struct snd_kcontrol *kcontrol,
+                        int event)
+{
+       struct snd_soc_codec *codec = w->codec;
+       struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+       struct wm5102_priv *wm5102 = snd_soc_codec_get_drvdata(codec);
+
+       if (arizona->rev < 1)
+               return 0;
+
+       switch (event) {
+       case SND_SOC_DAPM_PRE_PMU:
+               if (!wm5102->spk_ena) {
+                       snd_soc_write(codec, 0x4f5, 0x25a);
+                       wm5102->spk_ena_pending = true;
+               }
+               break;
+       case SND_SOC_DAPM_POST_PMU:
+               if (wm5102->spk_ena_pending) {
+                       msleep(75);
+                       snd_soc_write(codec, 0x4f5, 0xda);
+                       wm5102->spk_ena_pending = false;
+                       wm5102->spk_ena++;
+               }
+               break;
+       case SND_SOC_DAPM_PRE_PMD:
+               wm5102->spk_ena--;
+               if (!wm5102->spk_ena)
+                       snd_soc_write(codec, 0x4f5, 0x25a);
+               break;
+       case SND_SOC_DAPM_POST_PMD:
+               if (!wm5102->spk_ena)
+                       snd_soc_write(codec, 0x4f5, 0x0da);
+               break;
+       }
+
+       return 0;
+}
+
+
 ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
 ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
 ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
@@ -852,8 +896,7 @@ static const unsigned int wm5102_aec_loopback_values[] = {
 
 static const struct soc_enum wm5102_aec_loopback =
        SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
-                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT,
-                             ARIZONA_AEC_LOOPBACK_SRC_MASK,
+                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
                              ARRAY_SIZE(wm5102_aec_loopback_texts),
                              wm5102_aec_loopback_texts,
                              wm5102_aec_loopback_values);
@@ -1034,10 +1077,10 @@ SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
                   ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT4L", ARIZONA_OUTPUT_ENABLES_1,
-                  ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, wm5102_spk_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT4R", ARIZONA_OUTPUT_ENABLES_1,
-                  ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+                  ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, wm5102_spk_ev,
                   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
                   ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
@@ -1109,6 +1152,8 @@ SND_SOC_DAPM_OUTPUT("SPKOUTRN"),
 SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
 SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
 SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
+
+SND_SOC_DAPM_OUTPUT("MICSUPP"),
 };
 
 #define ARIZONA_MIXER_INPUT_ROUTES(name)       \
@@ -1321,6 +1366,8 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
        { "AEC Loopback", "SPKDAT1R", "OUT5R" },
        { "SPKDAT1L", NULL, "OUT5L" },
        { "SPKDAT1R", NULL, "OUT5R" },
+
+       { "MICSUPP", NULL, "SYSCLK" },
 };
 
 static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
index ae80c8c285360cffd1ef82af06626ceccf2769d2..0320a32670d30c921836a0932f41d73b66b01c0b 100644 (file)
@@ -344,8 +344,7 @@ static const unsigned int wm5110_aec_loopback_values[] = {
 
 static const struct soc_enum wm5110_aec_loopback =
        SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
-                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT,
-                             ARIZONA_AEC_LOOPBACK_SRC_MASK,
+                             ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
                              ARRAY_SIZE(wm5110_aec_loopback_texts),
                              wm5110_aec_loopback_texts,
                              wm5110_aec_loopback_values);
@@ -625,6 +624,8 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
 SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
 SND_SOC_DAPM_OUTPUT("SPKDAT2L"),
 SND_SOC_DAPM_OUTPUT("SPKDAT2R"),
+
+SND_SOC_DAPM_OUTPUT("MICSUPP"),
 };
 
 #define ARIZONA_MIXER_INPUT_ROUTES(name)       \
@@ -833,6 +834,8 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
 
        { "SPKDAT2L", NULL, "OUT6L" },
        { "SPKDAT2R", NULL, "OUT6R" },
+
+       { "MICSUPP", NULL, "SYSCLK" },
 };
 
 static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
index ffc89fab96fbf4b6ccc81973f69df863132da1ec..b6b65483758518f8b8cab7672af882ef923d2768 100644 (file)
@@ -169,6 +169,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
        const struct wm_adsp_region *mem;
        const char *region_name;
        char *file, *text;
+       void *buf;
        unsigned int reg;
        int regions = 0;
        int ret, offset, type, sizes;
@@ -322,8 +323,18 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                }
 
                if (reg) {
-                       ret = regmap_raw_write(regmap, reg, region->data,
+                       buf = kmemdup(region->data, le32_to_cpu(region->len),
+                                     GFP_KERNEL | GFP_DMA);
+                       if (!buf) {
+                               adsp_err(dsp, "Out of memory\n");
+                               return -ENOMEM;
+                       }
+
+                       ret = regmap_raw_write(regmap, reg, buf,
                                               le32_to_cpu(region->len));
+
+                       kfree(buf);
+
                        if (ret != 0) {
                                adsp_err(dsp,
                                        "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -359,6 +370,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
        const char *region_name;
        int ret, pos, blocks, type, offset, reg;
        char *file;
+       void *buf;
 
        file = kzalloc(PAGE_SIZE, GFP_KERNEL);
        if (file == NULL)
@@ -384,7 +396,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
        hdr = (void*)&firmware->data[0];
        if (memcmp(hdr->magic, "WMDR", 4) != 0) {
                adsp_err(dsp, "%s: invalid magic\n", file);
-               return -EINVAL;
+               goto out_fw;
        }
 
        adsp_dbg(dsp, "%s: v%d.%d.%d\n", file,
@@ -426,6 +438,13 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                }
 
                if (reg) {
+                       buf = kmemdup(blk->data, le32_to_cpu(blk->len),
+                                     GFP_KERNEL | GFP_DMA);
+                       if (!buf) {
+                               adsp_err(dsp, "Out of memory\n");
+                               return -ENOMEM;
+                       }
+
                        ret = regmap_raw_write(regmap, reg, blk->data,
                                               le32_to_cpu(blk->len));
                        if (ret != 0) {
@@ -433,6 +452,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                                        "%s.%d: Failed to write to %x in %s\n",
                                        file, blocks, reg, region_name);
                        }
+
+                       kfree(buf);
                }
 
                pos += le32_to_cpu(blk->len) + sizeof(*blk);
index bf363d8d044aa2f1c38d44f0c66a20f1131a65de..500f8ce55d78f0891bcf71dabf1c98cae3e8379e 100644 (file)
@@ -154,26 +154,7 @@ static struct snd_soc_platform_driver imx_soc_platform_mx2 = {
        .pcm_free       = imx_pcm_free,
 };
 
-static int imx_soc_platform_probe(struct platform_device *pdev)
+int imx_pcm_dma_init(struct platform_device *pdev)
 {
        return snd_soc_register_platform(&pdev->dev, &imx_soc_platform_mx2);
 }
-
-static int imx_soc_platform_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_platform(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver imx_pcm_driver = {
-       .driver = {
-                       .name = "imx-pcm-audio",
-                       .owner = THIS_MODULE,
-       },
-       .probe = imx_soc_platform_probe,
-       .remove = imx_soc_platform_remove,
-};
-
-module_platform_driver(imx_pcm_driver);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-pcm-audio");
index 5ec362ae4d012b654cdadbfeace42036ed76764e..920f945cb2f4e17d721f7e5f542ca0b454ba9b74 100644 (file)
@@ -281,7 +281,7 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {
        .pcm_free       = imx_pcm_fiq_free,
 };
 
-static int imx_soc_platform_probe(struct platform_device *pdev)
+int imx_pcm_fiq_init(struct platform_device *pdev)
 {
        struct imx_ssi *ssi = platform_get_drvdata(pdev);
        int ret;
@@ -314,23 +314,3 @@ failed_register:
 
        return ret;
 }
-
-static int imx_soc_platform_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_platform(&pdev->dev);
-       return 0;
-}
-
-static struct platform_driver imx_pcm_driver = {
-       .driver = {
-                       .name = "imx-fiq-pcm-audio",
-                       .owner = THIS_MODULE,
-       },
-
-       .probe = imx_soc_platform_probe,
-       .remove = imx_soc_platform_remove,
-};
-
-module_platform_driver(imx_pcm_driver);
-
-MODULE_LICENSE("GPL");
index d5cd9eff3b48e92ca77349b930fe84056fcd7017..0d0625bfcb65a990e0ff119fa080a1b61f535857 100644 (file)
@@ -104,6 +104,38 @@ void imx_pcm_free(struct snd_pcm *pcm)
 }
 EXPORT_SYMBOL_GPL(imx_pcm_free);
 
+static int imx_pcm_probe(struct platform_device *pdev)
+{
+       if (strcmp(pdev->id_entry->name, "imx-fiq-pcm-audio") == 0)
+               return imx_pcm_fiq_init(pdev);
+
+       return imx_pcm_dma_init(pdev);
+}
+
+static int imx_pcm_remove(struct platform_device *pdev)
+{
+       snd_soc_unregister_platform(&pdev->dev);
+       return 0;
+}
+
+static struct platform_device_id imx_pcm_devtype[] = {
+       { .name = "imx-pcm-audio", },
+       { .name = "imx-fiq-pcm-audio", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, imx_pcm_devtype);
+
+static struct platform_driver imx_pcm_driver = {
+       .driver = {
+                       .name = "imx-pcm",
+                       .owner = THIS_MODULE,
+       },
+       .id_table = imx_pcm_devtype,
+       .probe = imx_pcm_probe,
+       .remove = imx_pcm_remove,
+};
+module_platform_driver(imx_pcm_driver);
+
 MODULE_DESCRIPTION("Freescale i.MX PCM driver");
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_LICENSE("GPL");
index 83c0ed7d55c9a4710b75846bb0bd5569d257ed0f..5ae13a13a353834f72bec2c933b3c4b19dfc3012 100644 (file)
@@ -30,4 +30,22 @@ int snd_imx_pcm_mmap(struct snd_pcm_substream *substream,
 int imx_pcm_new(struct snd_soc_pcm_runtime *rtd);
 void imx_pcm_free(struct snd_pcm *pcm);
 
+#ifdef CONFIG_SND_SOC_IMX_PCM_DMA
+int imx_pcm_dma_init(struct platform_device *pdev);
+#else
+static inline int imx_pcm_dma_init(struct platform_device *pdev)
+{
+       return -ENODEV;
+}
+#endif
+
+#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
+int imx_pcm_fiq_init(struct platform_device *pdev);
+#else
+static inline int imx_pcm_fiq_init(struct platform_device *pdev)
+{
+       return -ENODEV;
+}
+#endif
+
 #endif /* _IMX_PCM_H */
index 91d592ff67b7914cdbeec98c24eb3b4c1cb6a364..2370063b58245946f71adfa05f361097d3e53374 100644 (file)
@@ -1255,6 +1255,8 @@ static int soc_post_component_init(struct snd_soc_card *card,
        INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_CAPTURE].fe_clients);
        ret = device_add(rtd->dev);
        if (ret < 0) {
+               /* calling put_device() here to free the rtd->dev */
+               put_device(rtd->dev);
                dev_err(card->dev,
                        "ASoC: failed to register runtime device: %d\n", ret);
                return ret;
@@ -1554,7 +1556,7 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
        /* unregister the rtd device */
        if (rtd->dev_registered) {
                device_remove_file(rtd->dev, &dev_attr_codec_reg);
-               device_del(rtd->dev);
+               device_unregister(rtd->dev);
                rtd->dev_registered = 0;
        }
 
@@ -2917,7 +2919,7 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
        platform_max = mc->platform_max;
 
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-       uinfo->count = 1;
+       uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
        uinfo->value.integer.min = 0;
        uinfo->value.integer.max = platform_max - min;
 
@@ -2941,12 +2943,14 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
        unsigned int reg = mc->reg;
+       unsigned int rreg = mc->rreg;
        unsigned int shift = mc->shift;
        int min = mc->min;
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
        unsigned int val, val_mask;
+       int ret;
 
        val = ((ucontrol->value.integer.value[0] + min) & mask);
        if (invert)
@@ -2954,7 +2958,21 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
        val_mask = mask << shift;
        val = val << shift;
 
-       return snd_soc_update_bits_locked(codec, reg, val_mask, val);
+       ret = snd_soc_update_bits_locked(codec, reg, val_mask, val);
+       if (ret != 0)
+               return ret;
+
+       if (snd_soc_volsw_is_stereo(mc)) {
+               val = ((ucontrol->value.integer.value[1] + min) & mask);
+               if (invert)
+                       val = max - val;
+               val_mask = mask << shift;
+               val = val << shift;
+
+               ret = snd_soc_update_bits_locked(codec, rreg, val_mask, val);
+       }
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
 
@@ -2974,6 +2992,7 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
                (struct soc_mixer_control *)kcontrol->private_value;
        struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
        unsigned int reg = mc->reg;
+       unsigned int rreg = mc->rreg;
        unsigned int shift = mc->shift;
        int min = mc->min;
        int max = mc->max;
@@ -2988,6 +3007,16 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
        ucontrol->value.integer.value[0] =
                ucontrol->value.integer.value[0] - min;
 
+       if (snd_soc_volsw_is_stereo(mc)) {
+               ucontrol->value.integer.value[1] =
+                       (snd_soc_read(codec, rreg) >> shift) & mask;
+               if (invert)
+                       ucontrol->value.integer.value[1] =
+                               max - ucontrol->value.integer.value[1];
+               ucontrol->value.integer.value[1] =
+                       ucontrol->value.integer.value[1] - min;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
index 1e36bc81e5af1fb015ffef62f628781910984bbb..258acadb9e7d238a97b7af6508ffc21bd1a4488e 100644 (file)
@@ -1023,7 +1023,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
 
        if (SND_SOC_DAPM_EVENT_ON(event)) {
                if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
-                       ret = regulator_allow_bypass(w->regulator, true);
+                       ret = regulator_allow_bypass(w->regulator, false);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
                                         "ASoC: Failed to bypass %s: %d\n",
@@ -1033,7 +1033,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
                return regulator_enable(w->regulator);
        } else {
                if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
-                       ret = regulator_allow_bypass(w->regulator, false);
+                       ret = regulator_allow_bypass(w->regulator, true);
                        if (ret != 0)
                                dev_warn(w->dapm->dev,
                                         "ASoC: Failed to unbypass %s: %d\n",
@@ -3039,6 +3039,14 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
                                w->name, ret);
                        return NULL;
                }
+
+               if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+                       ret = regulator_allow_bypass(w->regulator, true);
+                       if (ret != 0)
+                               dev_warn(w->dapm->dev,
+                                        "ASoC: Failed to unbypass %s: %d\n",
+                                        w->name, ret);
+               }
                break;
        case snd_soc_dapm_clock_supply:
 #ifdef CONFIG_CLKDEV_LOOKUP
index d7711fce119b7b949bc521759b2f13e177dd657d..cf191e6aebbe1d1bfeb9dd511d0fe9b5e5bd3bbd 100644 (file)
@@ -1243,6 +1243,7 @@ static int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
                if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
                        continue;
 
index ed4d89c8b52a52e6425ce5c992cc5d324b339d35..e90daf8cdaa8726f8311092636461a9fee10b187 100644 (file)
@@ -1331,16 +1331,23 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
                }
                channels = (hdr->bLength - 7) / csize - 1;
                bmaControls = hdr->bmaControls;
+               if (hdr->bLength < 7 + csize) {
+                       snd_printk(KERN_ERR "usbaudio: unit %u: "
+                                  "invalid UAC_FEATURE_UNIT descriptor\n",
+                                  unitid);
+                       return -EINVAL;
+               }
        } else {
                struct uac2_feature_unit_descriptor *ftr = _ftr;
                csize = 4;
                channels = (hdr->bLength - 6) / 4 - 1;
                bmaControls = ftr->bmaControls;
-       }
-
-       if (hdr->bLength < 7 || !csize || hdr->bLength < 7 + csize) {
-               snd_printk(KERN_ERR "usbaudio: unit %u: invalid UAC_FEATURE_UNIT descriptor\n", unitid);
-               return -EINVAL;
+               if (hdr->bLength < 6 + csize) {
+                       snd_printk(KERN_ERR "usbaudio: unit %u: "
+                                  "invalid UAC_FEATURE_UNIT descriptor\n",
+                                  unitid);
+                       return -EINVAL;
+               }
        }
 
        /* parse the source unit */
index e71fe55cebefa8285963643e7a592a0b71fbfc73..0e2ed3d05c451c32a53032b735bc95c7868df143 100644 (file)
@@ -179,6 +179,15 @@ static struct usbmix_name_map audigy2nx_map[] = {
        { 0 } /* terminator */
 };
 
+static struct usbmix_selector_map c400_selectors[] = {
+       {
+               .id = 0x80,
+               .count = 2,
+               .names = (const char*[]) {"Internal", "SPDIF"}
+       },
+       { 0 } /* terminator */
+};
+
 static struct usbmix_selector_map audigy2nx_selectors[] = {
        {
                .id = 14, /* Capture Source */
@@ -366,6 +375,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x06f8, 0xc000),
                .map = hercules_usb51_map,
        },
+       {
+               .id = USB_ID(0x0763, 0x2030),
+               .selector_map = c400_selectors,
+       },
        {
                .id = USB_ID(0x08bb, 0x2702),
                .map = linex_map,
index 0422b1360af3a896da708a79365e3370be771fd8..15520de1df5647c95a4ffe1a13d8ede4485dee09 100644 (file)
@@ -1206,7 +1206,7 @@ static int snd_c400_create_mixer(struct usb_mixer_interface *mixer)
  * are valid they presents mono controls as L and R channels of
  * stereo. So we provide a good mixer here.
  */
-struct std_mono_table ebox44_table[] = {
+static struct std_mono_table ebox44_table[] = {
        {
                .unitid = 4,
                .control = 1,
index c6593101c049b88ca6e5661f8765b3f3df0b4d33..d82e378d37cbb7202aaa487b6003b6b171b79eab 100644 (file)
@@ -511,6 +511,16 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
        struct snd_usb_substream *sync_subs =
                &subs->stream->substream[subs->direction ^ 1];
 
+       if (subs->sync_endpoint->type != SND_USB_ENDPOINT_TYPE_DATA ||
+           !subs->stream)
+               return snd_usb_endpoint_set_params(subs->sync_endpoint,
+                                                  subs->pcm_format,
+                                                  subs->channels,
+                                                  subs->period_bytes,
+                                                  subs->cur_rate,
+                                                  subs->cur_audiofmt,
+                                                  NULL);
+
        /* Try to find the best matching audioformat. */
        list_for_each_entry(fp, &sync_subs->fmt_list, list) {
                int score = match_endpoint_audioformats(fp, subs->cur_audiofmt,
index 78e845ec65dafabc06332de0a3e7ea3d31e8b667..64d25a7a4d599538398e5a14d37d11a154ec65fd 100644 (file)
@@ -2289,7 +2289,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                                        .rate_table = (unsigned int[]) {
                                                        44100, 48000, 88200, 96000
                                        },
-                                       .clock = 0x81,
+                                       .clock = 0x80,
                                }
                        },
                        /* Capture */
@@ -2315,7 +2315,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                                        .rate_table = (unsigned int[]) {
                                                44100, 48000, 88200, 96000
                                        },
-                                       .clock = 0x81,
+                                       .clock = 0x80,
                                }
                        },
                        /* MIDI */
index acc12f004c23f05f12b21aa311f2f49dc77652d5..2c971858d6b70a146e57266bfa0afe7a1bfe80cb 100644 (file)
@@ -387,11 +387,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
                 * rules
                 */
                err = usb_driver_set_configuration(dev, 2);
-               if (err < 0) {
+               if (err < 0)
                        snd_printdd("error usb_driver_set_configuration: %d\n",
                                    err);
-                       return -ENODEV;
-               }
+               /* Always return an error, so that we stop creating a device
+                  that will just be destroyed and recreated with a new
+                  configuration */
+               return -ENODEV;
        } else
                snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
 
@@ -859,6 +861,17 @@ void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
        if ((le16_to_cpu(ep->chip->dev->descriptor.idVendor) == 0x23ba) &&
            ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
                ep->skip_packets = 4;
+
+       /*
+        * M-Audio Fast Track C400 - when packets are not skipped, real world
+        * latency varies by approx. +/- 50 frames (at 96KHz) each time the
+        * stream is (re)started. When skipping packets 16 at endpoint start
+        * up, the real world latency is stable within +/- 1 frame (also
+        * across power cycles).
+        */
+       if (ep->chip->usb_id == USB_ID(0x0763, 0x2030) &&
+           ep->type == SND_USB_ENDPOINT_TYPE_DATA)
+               ep->skip_packets = 16;
 }
 
 void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
index 1f9a529fe544f768c4322e884e4c34d02d482b5e..fa36565b209d80e8e07c14a6e48dd87a5d5d14de 100644 (file)
@@ -3,6 +3,7 @@ include scripts/Makefile.include
 help:
        @echo 'Possible targets:'
        @echo ''
+       @echo '  cgroup     - cgroup tools'
        @echo '  cpupower   - a tool for all things x86 CPU power'
        @echo '  firewire   - the userspace part of nosy, an IEEE-1394 traffic sniffer'
        @echo '  lguest     - a minimal 32-bit x86 hypervisor'
@@ -15,7 +16,7 @@ help:
        @echo '  x86_energy_perf_policy - Intel energy policy tool'
        @echo ''
        @echo 'You can do:'
-       @echo ' $$ make -C tools/<tool>_install'
+       @echo ' $$ make -C tools/ <tool>_install'
        @echo ''
        @echo '  from the kernel command line to build and install one of'
        @echo '  the tools above'
@@ -33,7 +34,7 @@ help:
 cpupower: FORCE
        $(call descend,power/$@)
 
-firewire lguest perf usb virtio vm: FORCE
+cgroup firewire lguest perf usb virtio vm: FORCE
        $(call descend,$@)
 
 selftests: FORCE
@@ -45,7 +46,7 @@ turbostat x86_energy_perf_policy: FORCE
 cpupower_install:
        $(call descend,power/$(@:_install=),install)
 
-firewire_install lguest_install perf_install usb_install virtio_install vm_install:
+cgroup_install firewire_install lguest_install perf_install usb_install virtio_install vm_install:
        $(call descend,$(@:_install=),install)
 
 selftests_install:
@@ -54,14 +55,14 @@ selftests_install:
 turbostat_install x86_energy_perf_policy_install:
        $(call descend,power/x86/$(@:_install=),install)
 
-install: cpupower_install firewire_install lguest_install perf_install \
-               selftests_install turbostat_install usb_install virtio_install \
-               vm_install x86_energy_perf_policy_install
+install: cgroup_install cpupower_install firewire_install lguest_install \
+               perf_install selftests_install turbostat_install usb_install \
+               virtio_install vm_install x86_energy_perf_policy_install
 
 cpupower_clean:
        $(call descend,power/cpupower,clean)
 
-firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
+cgroup_clean firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean:
        $(call descend,$(@:_clean=),clean)
 
 selftests_clean:
@@ -70,8 +71,8 @@ selftests_clean:
 turbostat_clean x86_energy_perf_policy_clean:
        $(call descend,power/x86/$(@:_clean=),clean)
 
-clean: cpupower_clean firewire_clean lguest_clean perf_clean selftests_clean \
-               turbostat_clean usb_clean virtio_clean vm_clean \
-               x86_energy_perf_policy_clean
+clean: cgroup_clean cpupower_clean firewire_clean lguest_clean perf_clean \
+               selftests_clean turbostat_clean usb_clean virtio_clean \
+               vm_clean x86_energy_perf_policy_clean
 
 .PHONY: FORCE
diff --git a/tools/cgroup/.gitignore b/tools/cgroup/.gitignore
new file mode 100644 (file)
index 0000000..633cd9b
--- /dev/null
@@ -0,0 +1 @@
+cgroup_event_listener
diff --git a/tools/cgroup/Makefile b/tools/cgroup/Makefile
new file mode 100644 (file)
index 0000000..b428619
--- /dev/null
@@ -0,0 +1,11 @@
+# Makefile for cgroup tools
+
+CC = $(CROSS_COMPILE)gcc
+CFLAGS = -Wall -Wextra
+
+all: cgroup_event_listener
+%: %.c
+       $(CC) $(CFLAGS) -o $@ $^
+
+clean:
+       $(RM) cgroup_event_listener
similarity index 54%
rename from Documentation/cgroups/cgroup_event_listener.c
rename to tools/cgroup/cgroup_event_listener.c
index 3e082f96dc125333a2b32076710a23b78bc17c5f..4eb5507205c981a3f59fc055700eb8f369cb5d79 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <assert.h>
+#include <err.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
@@ -15,7 +16,7 @@
 
 #include <sys/eventfd.h>
 
-#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>\n"
+#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>"
 
 int main(int argc, char **argv)
 {
@@ -26,49 +27,33 @@ int main(int argc, char **argv)
        char line[LINE_MAX];
        int ret;
 
-       if (argc != 3) {
-               fputs(USAGE_STR, stderr);
-               return 1;
-       }
+       if (argc != 3)
+               errx(1, "%s", USAGE_STR);
 
        cfd = open(argv[1], O_RDONLY);
-       if (cfd == -1) {
-               fprintf(stderr, "Cannot open %s: %s\n", argv[1],
-                               strerror(errno));
-               goto out;
-       }
+       if (cfd == -1)
+               err(1, "Cannot open %s", argv[1]);
 
        ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control",
                        dirname(argv[1]));
-       if (ret >= PATH_MAX) {
-               fputs("Path to cgroup.event_control is too long\n", stderr);
-               goto out;
-       }
+       if (ret >= PATH_MAX)
+               errx(1, "Path to cgroup.event_control is too long");
 
        event_control = open(event_control_path, O_WRONLY);
-       if (event_control == -1) {
-               fprintf(stderr, "Cannot open %s: %s\n", event_control_path,
-                               strerror(errno));
-               goto out;
-       }
+       if (event_control == -1)
+               err(1, "Cannot open %s", event_control_path);
 
        efd = eventfd(0, 0);
-       if (efd == -1) {
-               perror("eventfd() failed");
-               goto out;
-       }
+       if (efd == -1)
+               err(1, "eventfd() failed");
 
        ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]);
-       if (ret >= LINE_MAX) {
-               fputs("Arguments string is too long\n", stderr);
-               goto out;
-       }
+       if (ret >= LINE_MAX)
+               errx(1, "Arguments string is too long");
 
        ret = write(event_control, line, strlen(line) + 1);
-       if (ret == -1) {
-               perror("Cannot write to cgroup.event_control");
-               goto out;
-       }
+       if (ret == -1)
+               err(1, "Cannot write to cgroup.event_control");
 
        while (1) {
                uint64_t result;
@@ -77,34 +62,21 @@ int main(int argc, char **argv)
                if (ret == -1) {
                        if (errno == EINTR)
                                continue;
-                       perror("Cannot read from eventfd");
-                       break;
+                       err(1, "Cannot read from eventfd");
                }
                assert(ret == sizeof(result));
 
                ret = access(event_control_path, W_OK);
                if ((ret == -1) && (errno == ENOENT)) {
-                               puts("The cgroup seems to have removed.");
-                               ret = 0;
-                               break;
-               }
-
-               if (ret == -1) {
-                       perror("cgroup.event_control "
-                                       "is not accessible any more");
+                       puts("The cgroup seems to have removed.");
                        break;
                }
 
+               if (ret == -1)
+                       err(1, "cgroup.event_control is not accessible any more");
+
                printf("%s %s: crossed\n", argv[1], argv[2]);
        }
 
-out:
-       if (efd >= 0)
-               close(efd);
-       if (event_control >= 0)
-               close(event_control);
-       if (cfd >= 0)
-               close(cfd);
-
-       return (ret != 0);
+       return 0;
 }
index 5a824e355d04d61a694853d285078effa2e2985e..82b0606dcb8ab04ed08920abf9c2c193e4ee42bf 100644 (file)
@@ -13,8 +13,7 @@
  * GNU Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -1224,6 +1223,34 @@ static int field_is_long(struct format_field *field)
        return 0;
 }
 
+static unsigned int type_size(const char *name)
+{
+       /* This covers all FIELD_IS_STRING types. */
+       static struct {
+               const char *type;
+               unsigned int size;
+       } table[] = {
+               { "u8",   1 },
+               { "u16",  2 },
+               { "u32",  4 },
+               { "u64",  8 },
+               { "s8",   1 },
+               { "s16",  2 },
+               { "s32",  4 },
+               { "s64",  8 },
+               { "char", 1 },
+               { },
+       };
+       int i;
+
+       for (i = 0; table[i].type; i++) {
+               if (!strcmp(table[i].type, name))
+                       return table[i].size;
+       }
+
+       return 0;
+}
+
 static int event_read_fields(struct event_format *event, struct format_field **fields)
 {
        struct format_field *field = NULL;
@@ -1233,6 +1260,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f
        int count = 0;
 
        do {
+               unsigned int size_dynamic = 0;
+
                type = read_token(&token);
                if (type == EVENT_NEWLINE) {
                        free_token(token);
@@ -1391,6 +1420,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                                field->type = new_type;
                                strcat(field->type, " ");
                                strcat(field->type, field->name);
+                               size_dynamic = type_size(field->name);
                                free_token(field->name);
                                strcat(field->type, brackets);
                                field->name = token;
@@ -1463,7 +1493,8 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                        if (read_expect_type(EVENT_ITEM, &token))
                                goto fail;
 
-                       /* add signed type */
+                       if (strtoul(token, NULL, 0))
+                               field->flags |= FIELD_IS_SIGNED;
 
                        free_token(token);
                        if (read_expected(EVENT_OP, ";") < 0)
@@ -1478,10 +1509,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                if (field->flags & FIELD_IS_ARRAY) {
                        if (field->arraylen)
                                field->elementsize = field->size / field->arraylen;
+                       else if (field->flags & FIELD_IS_DYNAMIC)
+                               field->elementsize = size_dynamic;
                        else if (field->flags & FIELD_IS_STRING)
                                field->elementsize = 1;
-                       else
-                               field->elementsize = event->pevent->long_size;
+                       else if (field->flags & FIELD_IS_LONG)
+                               field->elementsize = event->pevent ?
+                                                    event->pevent->long_size :
+                                                    sizeof(long);
                } else
                        field->elementsize = field->size;
 
@@ -1785,6 +1820,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
                   strcmp(token, "/") == 0 ||
                   strcmp(token, "<") == 0 ||
                   strcmp(token, ">") == 0 ||
+                  strcmp(token, "<=") == 0 ||
+                  strcmp(token, ">=") == 0 ||
                   strcmp(token, "==") == 0 ||
                   strcmp(token, "!=") == 0) {
 
@@ -2481,7 +2518,7 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char **
 
        free_token(token);
        arg = alloc_arg();
-       if (!field) {
+       if (!arg) {
                do_warning("%s: not enough memory!", __func__);
                *tok = NULL;
                return EVENT_ERROR;
index 24a4bbabc5d550c5c3a09113a8277f37ad1182b4..7be7e89533e4fed9979dae191cbb2aa808c45121 100644 (file)
@@ -13,8 +13,7 @@
  * GNU Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
index bc075006966eb69c9dc8f0fcc7a6d636da413cff..e76c9acb92cd5ab693ee70bc7851fffd237a4a4e 100644 (file)
@@ -13,8 +13,7 @@
  * GNU Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
index 5ea4326ad11f5d16901d49ff68ddc36648671bf4..2500e75583fcc26e6380731d407dc6d985ba6929 100644 (file)
@@ -13,8 +13,7 @@
  * GNU Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
index f023a133abb625bcecb171ae499cf43126edecaf..bba701cf10e6e28bf2efde0f2089c37131d88d01 100644 (file)
@@ -1,3 +1,22 @@
+/*
+ * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
index b1ccc923e8a50d47fdaf1cb766ab61197b8f37ef..a57db805136a2c205c6284d65045c2cbd5ee471d 100644 (file)
@@ -13,8 +13,7 @@
  * GNU Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
index ef6d22e879eb59dbf370096aaecda6d2344a42b0..eb30044a922a3fa2669371c859eadc231083635f 100644 (file)
@@ -222,10 +222,14 @@ install-pdf: pdf
 #install-html: html
 #      '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
 
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),tags)
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE
 
 -include $(OUTPUT)PERF-VERSION-FILE
+endif
+endif
 
 #
 # Determine "include::" file references in asciidoc files.
index c8ffd9fd5c6afdd53d1722cd2cbd41737cee0992..5ad07ef417f0e28dd1ab0a5fb4d26940c7f707d0 100644 (file)
@@ -61,11 +61,13 @@ OPTIONS
 
 --stdio:: Use the stdio interface.
 
---tui:: Use the TUI interface Use of --tui requires a tty, if one is not
+--tui:: Use the TUI interface. Use of --tui requires a tty, if one is not
        present, as when piping to other commands, the stdio interface is
        used. This interfaces starts by centering on the line with more
        samples, TAB/UNTAB cycles through the lines with more samples.
 
+--gtk:: Use the GTK interface.
+
 -C::
 --cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
        be provided as a comma-separated list with no space: 0,1. Ranges of
@@ -88,6 +90,9 @@ OPTIONS
 --objdump=<path>::
         Path to objdump binary.
 
+--skip-missing::
+       Skip symbols that cannot be annotated.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1]
index c1057701a7dc7d61e49f9b827d747e686a06bfd1..e9a8349a7172f7533fe970062c5936b4fd7df6fd 100644 (file)
@@ -24,6 +24,13 @@ OPTIONS
 -r::
 --remove=::
         Remove specified file from the cache.
+-M::
+--missing=:: 
+       List missing build ids in the cache for the specified file.
+-u::
+--update::
+       Update specified file of the cache. It can be used to update kallsyms
+       kernel dso to vmlinux in order to support annotation.
 -v::
 --verbose::
        Be more verbose.
index 194f37d635dfa3e543f21a37807f6be61e9e91bc..5b3123d5721f94286a62979a3aabad8c6c7a0e7f 100644 (file)
@@ -22,10 +22,6 @@ specified perf.data files.
 
 OPTIONS
 -------
--M::
---displacement::
-        Show position displacement relative to baseline.
-
 -D::
 --dump-raw-trace::
         Dump raw trace in ASCII.
index 15217345c2fab7d13a451ff08473d5faf29d7171..1ceb3700ffbb4f6807d5a77fa4071fedb0aa60f6 100644 (file)
@@ -28,6 +28,10 @@ OPTIONS
 --verbose=::
        Show all fields.
 
+-g::
+--group::
+       Show event group information.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-list[1],
index f4d91bebd59d4adc70272a3a11e25f9c42b827e1..02284a0067f0a416c2db23203ecfcc76ae7bd808 100644 (file)
@@ -57,11 +57,44 @@ OPTIONS
 
 -s::
 --sort=::
-       Sort by key(s): pid, comm, dso, symbol, parent, srcline.
+       Sort histogram entries by given key(s) - multiple keys can be specified
+       in CSV format.  Following sort keys are available:
+       pid, comm, dso, symbol, parent, cpu, srcline.
+
+       Each key has following meaning:
+
+       - comm: command (name) of the task which can be read via /proc/<pid>/comm
+       - pid: command and tid of the task
+       - dso: name of library or module executed at the time of sample
+       - symbol: name of function executed at the time of sample
+       - parent: name of function matched to the parent regex filter. Unmatched
+       entries are displayed as "[other]".
+       - cpu: cpu number the task ran at the time of sample
+       - srcline: filename and line number executed at the time of sample.  The
+       DWARF debuggin info must be provided.
+
+       By default, comm, dso and symbol keys are used.
+       (i.e. --sort comm,dso,symbol)
+
+       If --branch-stack option is used, following sort keys are also
+       available:
+       dso_from, dso_to, symbol_from, symbol_to, mispredict.
+
+       - dso_from: name of library or module branched from
+       - dso_to: name of library or module branched to
+       - symbol_from: name of function branched from
+       - symbol_to: name of function branched to
+       - mispredict: "N" for predicted branch, "Y" for mispredicted branch
+
+       And default sort keys are changed to comm, dso_from, symbol_from, dso_to
+       and symbol_to, see '--branch-stack'.
 
 -p::
 --parent=<regex>::
-        regex filter to identify parent, see: '--sort parent'
+        A regex filter to identify parent. The parent is a caller of this
+       function and searched through the callchain, thus it requires callchain
+       information recorded. The pattern is in the exteneded regex format and
+       defaults to "\^sys_|^do_page_fault", see '--sort parent'.
 
 -x::
 --exclude-other::
@@ -74,7 +107,6 @@ OPTIONS
 
 -t::
 --field-separator=::
-
        Use a special separator character and don't pad with spaces, replacing
        all occurrences of this separator in symbol names (and other output)
        with a '.' character, that thus it's the only non valid separator.
@@ -171,6 +203,9 @@ OPTIONS
 --objdump=<path>::
         Path to objdump binary.
 
+--group::
+       Show event group information together.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-annotate[1]
index a4027f221a535457c672f8e6d99ccb4fe1db0925..9f1f054b8432c7aaed29e0f905f58ab25db2d2a9 100644 (file)
@@ -336,7 +336,6 @@ scripts listed by the 'perf script -l' command e.g.:
 ----
 root@tropicana:~# perf script -l
 List of available trace scripts:
-  workqueue-stats                      workqueue stats (ins/exe/create/destroy)
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
   rw-by-pid                            system-wide r/w activity
@@ -402,7 +401,6 @@ should show a new entry for your script:
 ----
 root@tropicana:~# perf script -l
 List of available trace scripts:
-  workqueue-stats                      workqueue stats (ins/exe/create/destroy)
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
   rw-by-pid                            system-wide r/w activity
index cf0c3107e06e4737fd4c2fda1f3f81a2a4a08e95..faf4f4feebccf239ef02d198900c8bc0be54afd2 100644 (file)
@@ -114,6 +114,17 @@ with it.  --append may be used here.  Examples:
 
 perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage
 
+-I msecs::
+--interval-print msecs::
+       Print count deltas every N milliseconds (minimum: 100ms)
+       example: perf stat -I 1000 -e cycles -a sleep 5
+
+--aggr-socket::
+Aggregate counts per processor socket for system-wide mode measurements.  This
+is a useful mode to detect imbalance between sockets.  To enable this mode,
+use --aggr-socket in addition to -a. (system-wide).  The output includes the
+socket number and the number of online processors on that socket. This is
+useful to gauge the amount of aggregation.
 
 EXAMPLES
 --------
index b24ac40fcd58cad9391ddee7495370e3eb2f814a..d1d3e5121f89ca0389229d0e4b63fd7f126145db 100644 (file)
@@ -23,6 +23,10 @@ from 'perf test list'.
 
 OPTIONS
 -------
+-s::
+--skip::
+       Tests to skip (comma separater numeric list).
+
 -v::
 --verbose::
        Be more verbose.
index 5b80d84d6b4a3e7e430499d0381c11a8856f316d..a414bc95fd528f89678c9221960a7f37170787bf 100644 (file)
@@ -60,7 +60,7 @@ Default is to monitor all CPUS.
 
 -i::
 --inherit::
-       Child tasks inherit counters, only makes sens with -p option.
+       Child tasks do not inherit counters.
 
 -k <path>::
 --vmlinux=<path>::
index 80db3f4bcf7a7d07a8233b535f43a4862fd40b21..39d41068484f963efc242cb3fc75649875f874f3 100644 (file)
@@ -11,11 +11,21 @@ lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
 arch/*/include/asm/perf_regs.h
+arch/*/include/uapi/asm/unistd*.h
+arch/*/include/uapi/asm/perf_regs.h
 arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
 include/linux/poison.h
 include/linux/magic.h
 include/linux/hw_breakpoint.h
+include/linux/rbtree_augmented.h
+include/uapi/linux/perf_event.h
+include/uapi/linux/const.h
+include/uapi/linux/swab.h
+include/uapi/linux/hw_breakpoint.h
 arch/x86/include/asm/svm.h
 arch/x86/include/asm/vmx.h
 arch/x86/include/asm/kvm_host.h
+arch/x86/include/uapi/asm/svm.h
+arch/x86/include/uapi/asm/vmx.h
+arch/x86/include/uapi/asm/kvm.h
index 891bc77bdb2cbba9910813c6df8866b1351a7734..a2108ca1cc17ad02e331aa02adf1870cc56a07ba 100644 (file)
@@ -47,10 +47,11 @@ include config/utilities.mak
 # backtrace post unwind.
 #
 # Define NO_BACKTRACE if you do not want stack backtrace debug feature
+#
+# Define NO_LIBNUMA if you do not want numa perf benchmark
 
 $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
        @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
--include $(OUTPUT)PERF-VERSION-FILE
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 
@@ -58,7 +59,7 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                  -e s/arm.*/arm/ -e s/sa110/arm/ \
                                  -e s/s390x/s390/ -e s/parisc64/parisc/ \
                                  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-                                 -e s/sh[234].*/sh/ )
+                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ )
 NO_PERF_REGS := 1
 
 CC = $(CROSS_COMPILE)gcc
@@ -148,13 +149,25 @@ RM = rm -f
 MKDIR = mkdir
 FIND = find
 INSTALL = install
+FLEX = flex
+BISON= bison
 
 # sparse is architecture-neutral, which means that we need to tell it
 # explicitly what architecture to check for. Fix this up for yours..
 SPARSE_FLAGS = -D__BIG_ENDIAN__ -D__powerpc__
 
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),tags)
 -include config/feature-tests.mak
 
+ifeq ($(call get-executable,$(FLEX)),)
+       dummy := $(error Error: $(FLEX) is missing on this system, please install it)
+endif
+
+ifeq ($(call get-executable,$(BISON)),)
+       dummy := $(error Error: $(BISON) is missing on this system, please install it)
+endif
+
 ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
        CFLAGS := $(CFLAGS) -fstack-protector-all
 endif
@@ -206,6 +219,8 @@ ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y)
        EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
        BASIC_CFLAGS += -I.
 endif
+endif # MAKECMDGOALS != tags
+endif # MAKECMDGOALS != clean
 
 # Guard against environment variables
 BUILTIN_OBJS =
@@ -230,11 +245,19 @@ endif
 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
 TE_LIB := -L$(TE_PATH) -ltraceevent
 
+export LIBTRACEEVENT
+
+# python extension build directories
+PYTHON_EXTBUILD     := $(OUTPUT)python_ext_build/
+PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
+PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
+export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
+
+python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
+
 PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py
 
-export LIBTRACEEVENT
-
 $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
        $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \
          --quiet build_ext; \
@@ -269,20 +292,17 @@ endif
 
 export PERL_PATH
 
-FLEX = flex
-BISON= bison
-
 $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
        $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c
 
 $(OUTPUT)util/parse-events-bison.c: util/parse-events.y
-       $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c
+       $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -p parse_events_
 
 $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
        $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c
 
 $(OUTPUT)util/pmu-bison.c: util/pmu.y
-       $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c
+       $(QUIET_BISON)$(BISON) -v util/pmu.y -d -o $(OUTPUT)util/pmu-bison.c -p perf_pmu_
 
 $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
 $(OUTPUT)util/pmu.o: $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c
@@ -378,8 +398,11 @@ LIB_H += util/rblist.h
 LIB_H += util/intlist.h
 LIB_H += util/perf_regs.h
 LIB_H += util/unwind.h
-LIB_H += ui/helpline.h
 LIB_H += util/vdso.h
+LIB_H += ui/helpline.h
+LIB_H += ui/progress.h
+LIB_H += ui/util.h
+LIB_H += ui/ui.h
 
 LIB_OBJS += $(OUTPUT)util/abspath.o
 LIB_OBJS += $(OUTPUT)util/alias.o
@@ -453,6 +476,7 @@ LIB_OBJS += $(OUTPUT)util/stat.o
 LIB_OBJS += $(OUTPUT)ui/setup.o
 LIB_OBJS += $(OUTPUT)ui/helpline.o
 LIB_OBJS += $(OUTPUT)ui/progress.o
+LIB_OBJS += $(OUTPUT)ui/util.o
 LIB_OBJS += $(OUTPUT)ui/hist.o
 LIB_OBJS += $(OUTPUT)ui/stdio/hist.o
 
@@ -471,7 +495,8 @@ LIB_OBJS += $(OUTPUT)tests/rdpmc.o
 LIB_OBJS += $(OUTPUT)tests/evsel-roundtrip-name.o
 LIB_OBJS += $(OUTPUT)tests/evsel-tp-sched.o
 LIB_OBJS += $(OUTPUT)tests/pmu.o
-LIB_OBJS += $(OUTPUT)tests/util.o
+LIB_OBJS += $(OUTPUT)tests/hists_link.o
+LIB_OBJS += $(OUTPUT)tests/python-use.o
 
 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@@ -510,14 +535,13 @@ PERFLIBS = $(LIB_FILE) $(LIBTRACEEVENT)
 #
 # Platform specific tweaks
 #
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),tags)
 
 # We choose to avoid "if .. else if .. else .. endif endif"
 # because maintaining the nesting to match is a pain.  If
 # we had "elif" things would have been much nicer...
 
--include config.mak.autogen
--include config.mak
-
 ifdef NO_LIBELF
        NO_DWARF := 1
        NO_DEMANGLE := 1
@@ -557,6 +581,11 @@ else
 endif # SOURCE_LIBELF
 endif # NO_LIBELF
 
+# There's only x86 (both 32 and 64) support for CFI unwind so far
+ifneq ($(ARCH),x86)
+       NO_LIBUNWIND := 1
+endif
+
 ifndef NO_LIBUNWIND
 # for linking with debug library, run like:
 # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
@@ -646,7 +675,6 @@ ifndef NO_NEWT
                LIB_OBJS += $(OUTPUT)ui/browsers/hists.o
                LIB_OBJS += $(OUTPUT)ui/browsers/map.o
                LIB_OBJS += $(OUTPUT)ui/browsers/scripts.o
-               LIB_OBJS += $(OUTPUT)ui/util.o
                LIB_OBJS += $(OUTPUT)ui/tui/setup.o
                LIB_OBJS += $(OUTPUT)ui/tui/util.o
                LIB_OBJS += $(OUTPUT)ui/tui/helpline.o
@@ -655,9 +683,6 @@ ifndef NO_NEWT
                LIB_H += ui/browsers/map.h
                LIB_H += ui/keysyms.h
                LIB_H += ui/libslang.h
-               LIB_H += ui/progress.h
-               LIB_H += ui/util.h
-               LIB_H += ui/ui.h
        endif
 endif
 
@@ -673,14 +698,12 @@ ifndef NO_GTK2
                BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
                EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
                LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
+               LIB_OBJS += $(OUTPUT)ui/gtk/hists.o
                LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
                LIB_OBJS += $(OUTPUT)ui/gtk/util.o
                LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o
                LIB_OBJS += $(OUTPUT)ui/gtk/progress.o
-               # Make sure that it'd be included only once.
-               ifeq ($(findstring -DNEWT_SUPPORT,$(BASIC_CFLAGS)),)
-                       LIB_OBJS += $(OUTPUT)ui/util.o
-               endif
+               LIB_OBJS += $(OUTPUT)ui/gtk/annotate.o
        endif
 endif
 
@@ -707,7 +730,7 @@ disable-python = $(eval $(disable-python_code))
 define disable-python_code
   BASIC_CFLAGS += -DNO_LIBPYTHON
   $(if $(1),$(warning No $(1) was found))
-  $(warning Python support won't be built)
+  $(warning Python support will not be built)
 endef
 
 override PYTHON := \
@@ -715,19 +738,10 @@ override PYTHON := \
 
 ifndef PYTHON
   $(call disable-python,python interpreter)
-  python-clean :=
 else
 
   PYTHON_WORD := $(call shell-wordify,$(PYTHON))
 
-  # python extension build directories
-  PYTHON_EXTBUILD     := $(OUTPUT)python_ext_build/
-  PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/
-  PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/
-  export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
-
-  python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so
-
   ifdef NO_LIBPYTHON
     $(call disable-python)
   else
@@ -839,10 +853,24 @@ ifndef NO_BACKTRACE
        endif
 endif
 
+ifndef NO_LIBNUMA
+       FLAGS_LIBNUMA = $(ALL_CFLAGS) $(ALL_LDFLAGS) -lnuma
+       ifneq ($(call try-cc,$(SOURCE_LIBNUMA),$(FLAGS_LIBNUMA),libnuma),y)
+               msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev);
+       else
+               BASIC_CFLAGS += -DLIBNUMA_SUPPORT
+               BUILTIN_OBJS += $(OUTPUT)bench/numa.o
+               EXTLIBS += -lnuma
+       endif
+endif
+
 ifdef ASCIIDOC8
        export ASCIIDOC8
 endif
 
+endif # MAKECMDGOALS != tags
+endif # MAKECMDGOALS != clean
+
 # Shell quote (do not use $(call) to accommodate ancient setups);
 
 ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
@@ -884,7 +912,7 @@ strip: $(PROGRAMS) $(OUTPUT)perf
        $(STRIP) $(STRIP_OPTS) $(PROGRAMS) $(OUTPUT)perf
 
 $(OUTPUT)perf.o: perf.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
-       $(QUIET_CC)$(CC) -DPERF_VERSION='"$(PERF_VERSION)"' \
+       $(QUIET_CC)$(CC) -include $(OUTPUT)PERF-VERSION-FILE \
                '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
                $(ALL_CFLAGS) -c $(filter %.c,$^) -o $@
 
@@ -948,7 +976,13 @@ $(OUTPUT)util/exec_cmd.o: util/exec_cmd.c $(OUTPUT)PERF-CFLAGS
 
 $(OUTPUT)tests/attr.o: tests/attr.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
-               '-DBINDIR="$(bindir_SQ)"' \
+               '-DBINDIR="$(bindir_SQ)"' -DPYTHON='"$(PYTHON_WORD)"' \
+               $<
+
+$(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) \
+               -DPYTHONPATH='"$(OUTPUT)python"' \
+               -DPYTHON='"$(PYTHON_WORD)"' \
                $<
 
 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
@@ -1099,7 +1133,7 @@ perfexec_instdir = $(prefix)/$(perfexecdir)
 endif
 perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
 
-install: all try-install-man
+install-bin: all
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
        $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
@@ -1120,6 +1154,8 @@ install: all try-install-man
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
        $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
 
+install: install-bin try-install-man
+
 install-python_ext:
        $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
 
index 3e975cb6232eb97b2d14a9d4f069653727a6d649..aacef07ebf31525227b372e6c40e9a3039c5d580 100644 (file)
@@ -155,6 +155,7 @@ static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
                if (lookup_path(buf))
                        goto out;
                free(buf);
+               buf = NULL;
        }
 
        if (!strcmp(arch, "arm"))
index 8f89998eeaf4eeb3fd45388f6aacd34438e2f1f8..a5223e6a7b432a0b04faf5ba5dfc130cd9ed150e 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef BENCH_H
 #define BENCH_H
 
+extern int bench_numa(int argc, const char **argv, const char *prefix);
 extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
 extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
 extern int bench_mem_memcpy(int argc, const char **argv,
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
new file mode 100644 (file)
index 0000000..30d1c32
--- /dev/null
@@ -0,0 +1,1731 @@
+/*
+ * numa.c
+ *
+ * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
+ */
+
+#include "../perf.h"
+#include "../builtin.h"
+#include "../util/util.h"
+#include "../util/parse-options.h"
+
+#include "bench.h"
+
+#include <errno.h>
+#include <sched.h>
+#include <stdio.h>
+#include <assert.h>
+#include <malloc.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <sys/wait.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+
+#include <numa.h>
+#include <numaif.h>
+
+/*
+ * Regular printout to the terminal, supressed if -q is specified:
+ */
+#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
+
+/*
+ * Debug printf:
+ */
+#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
+
+struct thread_data {
+       int                     curr_cpu;
+       cpu_set_t               bind_cpumask;
+       int                     bind_node;
+       u8                      *process_data;
+       int                     process_nr;
+       int                     thread_nr;
+       int                     task_nr;
+       unsigned int            loops_done;
+       u64                     val;
+       u64                     runtime_ns;
+       pthread_mutex_t         *process_lock;
+};
+
+/* Parameters set by options: */
+
+struct params {
+       /* Startup synchronization: */
+       bool                    serialize_startup;
+
+       /* Task hierarchy: */
+       int                     nr_proc;
+       int                     nr_threads;
+
+       /* Working set sizes: */
+       const char              *mb_global_str;
+       const char              *mb_proc_str;
+       const char              *mb_proc_locked_str;
+       const char              *mb_thread_str;
+
+       double                  mb_global;
+       double                  mb_proc;
+       double                  mb_proc_locked;
+       double                  mb_thread;
+
+       /* Access patterns to the working set: */
+       bool                    data_reads;
+       bool                    data_writes;
+       bool                    data_backwards;
+       bool                    data_zero_memset;
+       bool                    data_rand_walk;
+       u32                     nr_loops;
+       u32                     nr_secs;
+       u32                     sleep_usecs;
+
+       /* Working set initialization: */
+       bool                    init_zero;
+       bool                    init_random;
+       bool                    init_cpu0;
+
+       /* Misc options: */
+       int                     show_details;
+       int                     run_all;
+       int                     thp;
+
+       long                    bytes_global;
+       long                    bytes_process;
+       long                    bytes_process_locked;
+       long                    bytes_thread;
+
+       int                     nr_tasks;
+       bool                    show_quiet;
+
+       bool                    show_convergence;
+       bool                    measure_convergence;
+
+       int                     perturb_secs;
+       int                     nr_cpus;
+       int                     nr_nodes;
+
+       /* Affinity options -C and -N: */
+       char                    *cpu_list_str;
+       char                    *node_list_str;
+};
+
+
+/* Global, read-writable area, accessible to all processes and threads: */
+
+struct global_info {
+       u8                      *data;
+
+       pthread_mutex_t         startup_mutex;
+       int                     nr_tasks_started;
+
+       pthread_mutex_t         startup_done_mutex;
+
+       pthread_mutex_t         start_work_mutex;
+       int                     nr_tasks_working;
+
+       pthread_mutex_t         stop_work_mutex;
+       u64                     bytes_done;
+
+       struct thread_data      *threads;
+
+       /* Convergence latency measurement: */
+       bool                    all_converged;
+       bool                    stop_work;
+
+       int                     print_once;
+
+       struct params           p;
+};
+
+static struct global_info      *g = NULL;
+
+static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
+static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
+
+struct params p0;
+
+static const struct option options[] = {
+       OPT_INTEGER('p', "nr_proc"      , &p0.nr_proc,          "number of processes"),
+       OPT_INTEGER('t', "nr_threads"   , &p0.nr_threads,       "number of threads per process"),
+
+       OPT_STRING('G', "mb_global"     , &p0.mb_global_str,    "MB", "global  memory (MBs)"),
+       OPT_STRING('P', "mb_proc"       , &p0.mb_proc_str,      "MB", "process memory (MBs)"),
+       OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
+       OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
+
+       OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run"),
+       OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run"),
+       OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
+
+       OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via writes (can be mixed with -W)"),
+       OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
+       OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
+       OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
+       OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,  "access the data with random (32bit LFSR) walk"),
+
+
+       OPT_BOOLEAN('z', "init_zero"    , &p0.init_zero,        "bzero the initial allocations"),
+       OPT_BOOLEAN('I', "init_random"  , &p0.init_random,      "randomize the contents of the initial allocations"),
+       OPT_BOOLEAN('0', "init_cpu0"    , &p0.init_cpu0,        "do the initial allocations on CPU#0"),
+       OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,      "perturb thread 0/0 every X secs, to test convergence stability"),
+
+       OPT_INCR   ('d', "show_details" , &p0.show_details,     "Show details"),
+       OPT_INCR   ('a', "all"          , &p0.run_all,          "Run all tests in the suite"),
+       OPT_INTEGER('H', "thp"          , &p0.thp,              "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
+       OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"),
+       OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
+       OPT_BOOLEAN('q', "quiet"        , &p0.show_quiet,       "bzero the initial allocations"),
+       OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
+
+       /* Special option string parsing callbacks: */
+        OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
+                       "bind the first N tasks to these specific cpus (the rest is unbound)",
+                       parse_cpus_opt),
+        OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
+                       "bind the first N tasks to these specific memory nodes (the rest is unbound)",
+                       parse_nodes_opt),
+       OPT_END()
+};
+
+static const char * const bench_numa_usage[] = {
+       "perf bench numa <options>",
+       NULL
+};
+
+static const char * const numa_usage[] = {
+       "perf bench numa mem [<options>]",
+       NULL
+};
+
+static cpu_set_t bind_to_cpu(int target_cpu)
+{
+       cpu_set_t orig_mask, mask;
+       int ret;
+
+       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
+       BUG_ON(ret);
+
+       CPU_ZERO(&mask);
+
+       if (target_cpu == -1) {
+               int cpu;
+
+               for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
+                       CPU_SET(cpu, &mask);
+       } else {
+               BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
+               CPU_SET(target_cpu, &mask);
+       }
+
+       ret = sched_setaffinity(0, sizeof(mask), &mask);
+       BUG_ON(ret);
+
+       return orig_mask;
+}
+
+static cpu_set_t bind_to_node(int target_node)
+{
+       int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
+       cpu_set_t orig_mask, mask;
+       int cpu;
+       int ret;
+
+       BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
+       BUG_ON(!cpus_per_node);
+
+       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
+       BUG_ON(ret);
+
+       CPU_ZERO(&mask);
+
+       if (target_node == -1) {
+               for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
+                       CPU_SET(cpu, &mask);
+       } else {
+               int cpu_start = (target_node + 0) * cpus_per_node;
+               int cpu_stop  = (target_node + 1) * cpus_per_node;
+
+               BUG_ON(cpu_stop > g->p.nr_cpus);
+
+               for (cpu = cpu_start; cpu < cpu_stop; cpu++)
+                       CPU_SET(cpu, &mask);
+       }
+
+       ret = sched_setaffinity(0, sizeof(mask), &mask);
+       BUG_ON(ret);
+
+       return orig_mask;
+}
+
+static void bind_to_cpumask(cpu_set_t mask)
+{
+       int ret;
+
+       ret = sched_setaffinity(0, sizeof(mask), &mask);
+       BUG_ON(ret);
+}
+
+static void mempol_restore(void)
+{
+       int ret;
+
+       ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
+
+       BUG_ON(ret);
+}
+
+static void bind_to_memnode(int node)
+{
+       unsigned long nodemask;
+       int ret;
+
+       if (node == -1)
+               return;
+
+       BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask));
+       nodemask = 1L << node;
+
+       ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
+       dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
+
+       BUG_ON(ret);
+}
+
+#define HPSIZE (2*1024*1024)
+
+#define set_taskname(fmt...)                           \
+do {                                                   \
+       char name[20];                                  \
+                                                       \
+       snprintf(name, 20, fmt);                        \
+       prctl(PR_SET_NAME, name);                       \
+} while (0)
+
+static u8 *alloc_data(ssize_t bytes0, int map_flags,
+                     int init_zero, int init_cpu0, int thp, int init_random)
+{
+       cpu_set_t orig_mask;
+       ssize_t bytes;
+       u8 *buf;
+       int ret;
+
+       if (!bytes0)
+               return NULL;
+
+       /* Allocate and initialize all memory on CPU#0: */
+       if (init_cpu0) {
+               orig_mask = bind_to_node(0);
+               bind_to_memnode(0);
+       }
+
+       bytes = bytes0 + HPSIZE;
+
+       buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
+       BUG_ON(buf == (void *)-1);
+
+       if (map_flags == MAP_PRIVATE) {
+               if (thp > 0) {
+                       ret = madvise(buf, bytes, MADV_HUGEPAGE);
+                       if (ret && !g->print_once) {
+                               g->print_once = 1;
+                               printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
+                       }
+               }
+               if (thp < 0) {
+                       ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
+                       if (ret && !g->print_once) {
+                               g->print_once = 1;
+                               printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
+                       }
+               }
+       }
+
+       if (init_zero) {
+               bzero(buf, bytes);
+       } else {
+               /* Initialize random contents, different in each word: */
+               if (init_random) {
+                       u64 *wbuf = (void *)buf;
+                       long off = rand();
+                       long i;
+
+                       for (i = 0; i < bytes/8; i++)
+                               wbuf[i] = i + off;
+               }
+       }
+
+       /* Align to 2MB boundary: */
+       buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
+
+       /* Restore affinity: */
+       if (init_cpu0) {
+               bind_to_cpumask(orig_mask);
+               mempol_restore();
+       }
+
+       return buf;
+}
+
+static void free_data(void *data, ssize_t bytes)
+{
+       int ret;
+
+       if (!data)
+               return;
+
+       ret = munmap(data, bytes);
+       BUG_ON(ret);
+}
+
+/*
+ * Create a shared memory buffer that can be shared between processes, zeroed:
+ */
+static void * zalloc_shared_data(ssize_t bytes)
+{
+       return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
+}
+
+/*
+ * Create a shared memory buffer that can be shared between processes:
+ */
+static void * setup_shared_data(ssize_t bytes)
+{
+       return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
+}
+
+/*
+ * Allocate process-local memory - this will either be shared between
+ * threads of this process, or only be accessed by this thread:
+ */
+static void * setup_private_data(ssize_t bytes)
+{
+       return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
+}
+
+/*
+ * Return a process-shared (global) mutex:
+ */
+static void init_global_mutex(pthread_mutex_t *mutex)
+{
+       pthread_mutexattr_t attr;
+
+       pthread_mutexattr_init(&attr);
+       pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
+       pthread_mutex_init(mutex, &attr);
+}
+
+static int parse_cpu_list(const char *arg)
+{
+       p0.cpu_list_str = strdup(arg);
+
+       dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
+
+       return 0;
+}
+
+static void parse_setup_cpu_list(void)
+{
+       struct thread_data *td;
+       char *str0, *str;
+       int t;
+
+       if (!g->p.cpu_list_str)
+               return;
+
+       dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
+
+       str0 = str = strdup(g->p.cpu_list_str);
+       t = 0;
+
+       BUG_ON(!str);
+
+       tprintf("# binding tasks to CPUs:\n");
+       tprintf("#  ");
+
+       while (true) {
+               int bind_cpu, bind_cpu_0, bind_cpu_1;
+               char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
+               int bind_len;
+               int step;
+               int mul;
+
+               tok = strsep(&str, ",");
+               if (!tok)
+                       break;
+
+               tok_end = strstr(tok, "-");
+
+               dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
+               if (!tok_end) {
+                       /* Single CPU specified: */
+                       bind_cpu_0 = bind_cpu_1 = atol(tok);
+               } else {
+                       /* CPU range specified (for example: "5-11"): */
+                       bind_cpu_0 = atol(tok);
+                       bind_cpu_1 = atol(tok_end + 1);
+               }
+
+               step = 1;
+               tok_step = strstr(tok, "#");
+               if (tok_step) {
+                       step = atol(tok_step + 1);
+                       BUG_ON(step <= 0 || step >= g->p.nr_cpus);
+               }
+
+               /*
+                * Mask length.
+                * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
+                * where the _4 means the next 4 CPUs are allowed.
+                */
+               bind_len = 1;
+               tok_len = strstr(tok, "_");
+               if (tok_len) {
+                       bind_len = atol(tok_len + 1);
+                       BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
+               }
+
+               /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
+               mul = 1;
+               tok_mul = strstr(tok, "x");
+               if (tok_mul) {
+                       mul = atol(tok_mul + 1);
+                       BUG_ON(mul <= 0);
+               }
+
+               dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
+
+               BUG_ON(bind_cpu_0 < 0 || bind_cpu_0 >= g->p.nr_cpus);
+               BUG_ON(bind_cpu_1 < 0 || bind_cpu_1 >= g->p.nr_cpus);
+               BUG_ON(bind_cpu_0 > bind_cpu_1);
+
+               for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
+                       int i;
+
+                       for (i = 0; i < mul; i++) {
+                               int cpu;
+
+                               if (t >= g->p.nr_tasks) {
+                                       printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
+                                       goto out;
+                               }
+                               td = g->threads + t;
+
+                               if (t)
+                                       tprintf(",");
+                               if (bind_len > 1) {
+                                       tprintf("%2d/%d", bind_cpu, bind_len);
+                               } else {
+                                       tprintf("%2d", bind_cpu);
+                               }
+
+                               CPU_ZERO(&td->bind_cpumask);
+                               for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
+                                       BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
+                                       CPU_SET(cpu, &td->bind_cpumask);
+                               }
+                               t++;
+                       }
+               }
+       }
+out:
+
+       tprintf("\n");
+
+       if (t < g->p.nr_tasks)
+               printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
+
+       free(str0);
+}
+
+static int parse_cpus_opt(const struct option *opt __maybe_unused,
+                         const char *arg, int unset __maybe_unused)
+{
+       if (!arg)
+               return -1;
+
+       return parse_cpu_list(arg);
+}
+
+static int parse_node_list(const char *arg)
+{
+       p0.node_list_str = strdup(arg);
+
+       dprintf("got NODE list: {%s}\n", p0.node_list_str);
+
+       return 0;
+}
+
+static void parse_setup_node_list(void)
+{
+       struct thread_data *td;
+       char *str0, *str;
+       int t;
+
+       if (!g->p.node_list_str)
+               return;
+
+       dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
+
+       str0 = str = strdup(g->p.node_list_str);
+       t = 0;
+
+       BUG_ON(!str);
+
+       tprintf("# binding tasks to NODEs:\n");
+       tprintf("# ");
+
+       while (true) {
+               int bind_node, bind_node_0, bind_node_1;
+               char *tok, *tok_end, *tok_step, *tok_mul;
+               int step;
+               int mul;
+
+               tok = strsep(&str, ",");
+               if (!tok)
+                       break;
+
+               tok_end = strstr(tok, "-");
+
+               dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
+               if (!tok_end) {
+                       /* Single NODE specified: */
+                       bind_node_0 = bind_node_1 = atol(tok);
+               } else {
+                       /* NODE range specified (for example: "5-11"): */
+                       bind_node_0 = atol(tok);
+                       bind_node_1 = atol(tok_end + 1);
+               }
+
+               step = 1;
+               tok_step = strstr(tok, "#");
+               if (tok_step) {
+                       step = atol(tok_step + 1);
+                       BUG_ON(step <= 0 || step >= g->p.nr_nodes);
+               }
+
+               /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
+               mul = 1;
+               tok_mul = strstr(tok, "x");
+               if (tok_mul) {
+                       mul = atol(tok_mul + 1);
+                       BUG_ON(mul <= 0);
+               }
+
+               dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
+
+               BUG_ON(bind_node_0 < 0 || bind_node_0 >= g->p.nr_nodes);
+               BUG_ON(bind_node_1 < 0 || bind_node_1 >= g->p.nr_nodes);
+               BUG_ON(bind_node_0 > bind_node_1);
+
+               for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
+                       int i;
+
+                       for (i = 0; i < mul; i++) {
+                               if (t >= g->p.nr_tasks) {
+                                       printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
+                                       goto out;
+                               }
+                               td = g->threads + t;
+
+                               if (!t)
+                                       tprintf(" %2d", bind_node);
+                               else
+                                       tprintf(",%2d", bind_node);
+
+                               td->bind_node = bind_node;
+                               t++;
+                       }
+               }
+       }
+out:
+
+       tprintf("\n");
+
+       if (t < g->p.nr_tasks)
+               printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
+
+       free(str0);
+}
+
+static int parse_nodes_opt(const struct option *opt __maybe_unused,
+                         const char *arg, int unset __maybe_unused)
+{
+       if (!arg)
+               return -1;
+
+       return parse_node_list(arg);
+
+       return 0;
+}
+
+#define BIT(x) (1ul << x)
+
+static inline uint32_t lfsr_32(uint32_t lfsr)
+{
+       const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
+       return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
+}
+
+/*
+ * Make sure there's real data dependency to RAM (when read
+ * accesses are enabled), so the compiler, the CPU and the
+ * kernel (KSM, zero page, etc.) cannot optimize away RAM
+ * accesses:
+ */
+static inline u64 access_data(u64 *data __attribute__((unused)), u64 val)
+{
+       if (g->p.data_reads)
+               val += *data;
+       if (g->p.data_writes)
+               *data = val + 1;
+       return val;
+}
+
+/*
+ * The worker process does two types of work, a forwards going
+ * loop and a backwards going loop.
+ *
+ * We do this so that on multiprocessor systems we do not create
+ * a 'train' of processing, with highly synchronized processes,
+ * skewing the whole benchmark.
+ */
+static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
+{
+       long words = bytes/sizeof(u64);
+       u64 *data = (void *)__data;
+       long chunk_0, chunk_1;
+       u64 *d0, *d, *d1;
+       long off;
+       long i;
+
+       BUG_ON(!data && words);
+       BUG_ON(data && !words);
+
+       if (!data)
+               return val;
+
+       /* Very simple memset() work variant: */
+       if (g->p.data_zero_memset && !g->p.data_rand_walk) {
+               bzero(data, bytes);
+               return val;
+       }
+
+       /* Spread out by PID/TID nr and by loop nr: */
+       chunk_0 = words/nr_max;
+       chunk_1 = words/g->p.nr_loops;
+       off = nr*chunk_0 + loop*chunk_1;
+
+       while (off >= words)
+               off -= words;
+
+       if (g->p.data_rand_walk) {
+               u32 lfsr = nr + loop + val;
+               int j;
+
+               for (i = 0; i < words/1024; i++) {
+                       long start, end;
+
+                       lfsr = lfsr_32(lfsr);
+
+                       start = lfsr % words;
+                       end = min(start + 1024, words-1);
+
+                       if (g->p.data_zero_memset) {
+                               bzero(data + start, (end-start) * sizeof(u64));
+                       } else {
+                               for (j = start; j < end; j++)
+                                       val = access_data(data + j, val);
+                       }
+               }
+       } else if (!g->p.data_backwards || (nr + loop) & 1) {
+
+               d0 = data + off;
+               d  = data + off + 1;
+               d1 = data + words;
+
+               /* Process data forwards: */
+               for (;;) {
+                       if (unlikely(d >= d1))
+                               d = data;
+                       if (unlikely(d == d0))
+                               break;
+
+                       val = access_data(d, val);
+
+                       d++;
+               }
+       } else {
+               /* Process data backwards: */
+
+               d0 = data + off;
+               d  = data + off - 1;
+               d1 = data + words;
+
+               /* Process data forwards: */
+               for (;;) {
+                       if (unlikely(d < data))
+                               d = data + words-1;
+                       if (unlikely(d == d0))
+                               break;
+
+                       val = access_data(d, val);
+
+                       d--;
+               }
+       }
+
+       return val;
+}
+
+static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
+{
+       unsigned int cpu;
+
+       cpu = sched_getcpu();
+
+       g->threads[task_nr].curr_cpu = cpu;
+       prctl(0, bytes_worked);
+}
+
+#define MAX_NR_NODES   64
+
+/*
+ * Count the number of nodes a process's threads
+ * are spread out on.
+ *
+ * A count of 1 means that the process is compressed
+ * to a single node. A count of g->p.nr_nodes means it's
+ * spread out on the whole system.
+ */
+static int count_process_nodes(int process_nr)
+{
+       char node_present[MAX_NR_NODES] = { 0, };
+       int nodes;
+       int n, t;
+
+       for (t = 0; t < g->p.nr_threads; t++) {
+               struct thread_data *td;
+               int task_nr;
+               int node;
+
+               task_nr = process_nr*g->p.nr_threads + t;
+               td = g->threads + task_nr;
+
+               node = numa_node_of_cpu(td->curr_cpu);
+               node_present[node] = 1;
+       }
+
+       nodes = 0;
+
+       for (n = 0; n < MAX_NR_NODES; n++)
+               nodes += node_present[n];
+
+       return nodes;
+}
+
+/*
+ * Count the number of distinct process-threads a node contains.
+ *
+ * A count of 1 means that the node contains only a single
+ * process. If all nodes on the system contain at most one
+ * process then we are well-converged.
+ */
+static int count_node_processes(int node)
+{
+       int processes = 0;
+       int t, p;
+
+       for (p = 0; p < g->p.nr_proc; p++) {
+               for (t = 0; t < g->p.nr_threads; t++) {
+                       struct thread_data *td;
+                       int task_nr;
+                       int n;
+
+                       task_nr = p*g->p.nr_threads + t;
+                       td = g->threads + task_nr;
+
+                       n = numa_node_of_cpu(td->curr_cpu);
+                       if (n == node) {
+                               processes++;
+                               break;
+                       }
+               }
+       }
+
+       return processes;
+}
+
+static void calc_convergence_compression(int *strong)
+{
+       unsigned int nodes_min, nodes_max;
+       int p;
+
+       nodes_min = -1;
+       nodes_max =  0;
+
+       for (p = 0; p < g->p.nr_proc; p++) {
+               unsigned int nodes = count_process_nodes(p);
+
+               nodes_min = min(nodes, nodes_min);
+               nodes_max = max(nodes, nodes_max);
+       }
+
+       /* Strong convergence: all threads compress on a single node: */
+       if (nodes_min == 1 && nodes_max == 1) {
+               *strong = 1;
+       } else {
+               *strong = 0;
+               tprintf(" {%d-%d}", nodes_min, nodes_max);
+       }
+}
+
+static void calc_convergence(double runtime_ns_max, double *convergence)
+{
+       unsigned int loops_done_min, loops_done_max;
+       int process_groups;
+       int nodes[MAX_NR_NODES];
+       int distance;
+       int nr_min;
+       int nr_max;
+       int strong;
+       int sum;
+       int nr;
+       int node;
+       int cpu;
+       int t;
+
+       if (!g->p.show_convergence && !g->p.measure_convergence)
+               return;
+
+       for (node = 0; node < g->p.nr_nodes; node++)
+               nodes[node] = 0;
+
+       loops_done_min = -1;
+       loops_done_max = 0;
+
+       for (t = 0; t < g->p.nr_tasks; t++) {
+               struct thread_data *td = g->threads + t;
+               unsigned int loops_done;
+
+               cpu = td->curr_cpu;
+
+               /* Not all threads have written it yet: */
+               if (cpu < 0)
+                       continue;
+
+               node = numa_node_of_cpu(cpu);
+
+               nodes[node]++;
+
+               loops_done = td->loops_done;
+               loops_done_min = min(loops_done, loops_done_min);
+               loops_done_max = max(loops_done, loops_done_max);
+       }
+
+       nr_max = 0;
+       nr_min = g->p.nr_tasks;
+       sum = 0;
+
+       for (node = 0; node < g->p.nr_nodes; node++) {
+               nr = nodes[node];
+               nr_min = min(nr, nr_min);
+               nr_max = max(nr, nr_max);
+               sum += nr;
+       }
+       BUG_ON(nr_min > nr_max);
+
+       BUG_ON(sum > g->p.nr_tasks);
+
+       if (0 && (sum < g->p.nr_tasks))
+               return;
+
+       /*
+        * Count the number of distinct process groups present
+        * on nodes - when we are converged this will decrease
+        * to g->p.nr_proc:
+        */
+       process_groups = 0;
+
+       for (node = 0; node < g->p.nr_nodes; node++) {
+               int processes = count_node_processes(node);
+
+               nr = nodes[node];
+               tprintf(" %2d/%-2d", nr, processes);
+
+               process_groups += processes;
+       }
+
+       distance = nr_max - nr_min;
+
+       tprintf(" [%2d/%-2d]", distance, process_groups);
+
+       tprintf(" l:%3d-%-3d (%3d)",
+               loops_done_min, loops_done_max, loops_done_max-loops_done_min);
+
+       if (loops_done_min && loops_done_max) {
+               double skew = 1.0 - (double)loops_done_min/loops_done_max;
+
+               tprintf(" [%4.1f%%]", skew * 100.0);
+       }
+
+       calc_convergence_compression(&strong);
+
+       if (strong && process_groups == g->p.nr_proc) {
+               if (!*convergence) {
+                       *convergence = runtime_ns_max;
+                       tprintf(" (%6.1fs converged)\n", *convergence/1e9);
+                       if (g->p.measure_convergence) {
+                               g->all_converged = true;
+                               g->stop_work = true;
+                       }
+               }
+       } else {
+               if (*convergence) {
+                       tprintf(" (%6.1fs de-converged)", runtime_ns_max/1e9);
+                       *convergence = 0;
+               }
+               tprintf("\n");
+       }
+}
+
+static void show_summary(double runtime_ns_max, int l, double *convergence)
+{
+       tprintf("\r #  %5.1f%%  [%.1f mins]",
+               (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max/1e9 / 60.0);
+
+       calc_convergence(runtime_ns_max, convergence);
+
+       if (g->p.show_details >= 0)
+               fflush(stdout);
+}
+
+static void *worker_thread(void *__tdata)
+{
+       struct thread_data *td = __tdata;
+       struct timeval start0, start, stop, diff;
+       int process_nr = td->process_nr;
+       int thread_nr = td->thread_nr;
+       unsigned long last_perturbance;
+       int task_nr = td->task_nr;
+       int details = g->p.show_details;
+       int first_task, last_task;
+       double convergence = 0;
+       u64 val = td->val;
+       double runtime_ns_max;
+       u8 *global_data;
+       u8 *process_data;
+       u8 *thread_data;
+       u64 bytes_done;
+       long work_done;
+       u32 l;
+
+       bind_to_cpumask(td->bind_cpumask);
+       bind_to_memnode(td->bind_node);
+
+       set_taskname("thread %d/%d", process_nr, thread_nr);
+
+       global_data = g->data;
+       process_data = td->process_data;
+       thread_data = setup_private_data(g->p.bytes_thread);
+
+       bytes_done = 0;
+
+       last_task = 0;
+       if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
+               last_task = 1;
+
+       first_task = 0;
+       if (process_nr == 0 && thread_nr == 0)
+               first_task = 1;
+
+       if (details >= 2) {
+               printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
+                       process_nr, thread_nr, global_data, process_data, thread_data);
+       }
+
+       if (g->p.serialize_startup) {
+               pthread_mutex_lock(&g->startup_mutex);
+               g->nr_tasks_started++;
+               pthread_mutex_unlock(&g->startup_mutex);
+
+               /* Here we will wait for the main process to start us all at once: */
+               pthread_mutex_lock(&g->start_work_mutex);
+               g->nr_tasks_working++;
+
+               /* Last one wake the main process: */
+               if (g->nr_tasks_working == g->p.nr_tasks)
+                       pthread_mutex_unlock(&g->startup_done_mutex);
+
+               pthread_mutex_unlock(&g->start_work_mutex);
+       }
+
+       gettimeofday(&start0, NULL);
+
+       start = stop = start0;
+       last_perturbance = start.tv_sec;
+
+       for (l = 0; l < g->p.nr_loops; l++) {
+               start = stop;
+
+               if (g->stop_work)
+                       break;
+
+               val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,      l, val);
+               val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,   l, val);
+               val += do_work(thread_data,  g->p.bytes_thread,  0,          1,         l, val);
+
+               if (g->p.sleep_usecs) {
+                       pthread_mutex_lock(td->process_lock);
+                       usleep(g->p.sleep_usecs);
+                       pthread_mutex_unlock(td->process_lock);
+               }
+               /*
+                * Amount of work to be done under a process-global lock:
+                */
+               if (g->p.bytes_process_locked) {
+                       pthread_mutex_lock(td->process_lock);
+                       val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,    l, val);
+                       pthread_mutex_unlock(td->process_lock);
+               }
+
+               work_done = g->p.bytes_global + g->p.bytes_process +
+                           g->p.bytes_process_locked + g->p.bytes_thread;
+
+               update_curr_cpu(task_nr, work_done);
+               bytes_done += work_done;
+
+               if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
+                       continue;
+
+               td->loops_done = l;
+
+               gettimeofday(&stop, NULL);
+
+               /* Check whether our max runtime timed out: */
+               if (g->p.nr_secs) {
+                       timersub(&stop, &start0, &diff);
+                       if (diff.tv_sec >= g->p.nr_secs) {
+                               g->stop_work = true;
+                               break;
+                       }
+               }
+
+               /* Update the summary at most once per second: */
+               if (start.tv_sec == stop.tv_sec)
+                       continue;
+
+               /*
+                * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
+                * by migrating to CPU#0:
+                */
+               if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
+                       cpu_set_t orig_mask;
+                       int target_cpu;
+                       int this_cpu;
+
+                       last_perturbance = stop.tv_sec;
+
+                       /*
+                        * Depending on where we are running, move into
+                        * the other half of the system, to create some
+                        * real disturbance:
+                        */
+                       this_cpu = g->threads[task_nr].curr_cpu;
+                       if (this_cpu < g->p.nr_cpus/2)
+                               target_cpu = g->p.nr_cpus-1;
+                       else
+                               target_cpu = 0;
+
+                       orig_mask = bind_to_cpu(target_cpu);
+
+                       /* Here we are running on the target CPU already */
+                       if (details >= 1)
+                               printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
+
+                       bind_to_cpumask(orig_mask);
+               }
+
+               if (details >= 3) {
+                       timersub(&stop, &start, &diff);
+                       runtime_ns_max = diff.tv_sec * 1000000000;
+                       runtime_ns_max += diff.tv_usec * 1000;
+
+                       if (details >= 0) {
+                               printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016lx]\n",
+                                       process_nr, thread_nr, runtime_ns_max / bytes_done, val);
+                       }
+                       fflush(stdout);
+               }
+               if (!last_task)
+                       continue;
+
+               timersub(&stop, &start0, &diff);
+               runtime_ns_max = diff.tv_sec * 1000000000ULL;
+               runtime_ns_max += diff.tv_usec * 1000ULL;
+
+               show_summary(runtime_ns_max, l, &convergence);
+       }
+
+       gettimeofday(&stop, NULL);
+       timersub(&stop, &start0, &diff);
+       td->runtime_ns = diff.tv_sec * 1000000000ULL;
+       td->runtime_ns += diff.tv_usec * 1000ULL;
+
+       free_data(thread_data, g->p.bytes_thread);
+
+       pthread_mutex_lock(&g->stop_work_mutex);
+       g->bytes_done += bytes_done;
+       pthread_mutex_unlock(&g->stop_work_mutex);
+
+       return NULL;
+}
+
+/*
+ * A worker process starts a couple of threads:
+ */
+static void worker_process(int process_nr)
+{
+       pthread_mutex_t process_lock;
+       struct thread_data *td;
+       pthread_t *pthreads;
+       u8 *process_data;
+       int task_nr;
+       int ret;
+       int t;
+
+       pthread_mutex_init(&process_lock, NULL);
+       set_taskname("process %d", process_nr);
+
+       /*
+        * Pick up the memory policy and the CPU binding of our first thread,
+        * so that we initialize memory accordingly:
+        */
+       task_nr = process_nr*g->p.nr_threads;
+       td = g->threads + task_nr;
+
+       bind_to_memnode(td->bind_node);
+       bind_to_cpumask(td->bind_cpumask);
+
+       pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
+       process_data = setup_private_data(g->p.bytes_process);
+
+       if (g->p.show_details >= 3) {
+               printf(" # process %2d global mem: %p, process mem: %p\n",
+                       process_nr, g->data, process_data);
+       }
+
+       for (t = 0; t < g->p.nr_threads; t++) {
+               task_nr = process_nr*g->p.nr_threads + t;
+               td = g->threads + task_nr;
+
+               td->process_data = process_data;
+               td->process_nr   = process_nr;
+               td->thread_nr    = t;
+               td->task_nr      = task_nr;
+               td->val          = rand();
+               td->curr_cpu     = -1;
+               td->process_lock = &process_lock;
+
+               ret = pthread_create(pthreads + t, NULL, worker_thread, td);
+               BUG_ON(ret);
+       }
+
+       for (t = 0; t < g->p.nr_threads; t++) {
+                ret = pthread_join(pthreads[t], NULL);
+               BUG_ON(ret);
+       }
+
+       free_data(process_data, g->p.bytes_process);
+       free(pthreads);
+}
+
+static void print_summary(void)
+{
+       if (g->p.show_details < 0)
+               return;
+
+       printf("\n ###\n");
+       printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
+               g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
+       printf(" #      %5dx %5ldMB global  shared mem operations\n",
+                       g->p.nr_loops, g->p.bytes_global/1024/1024);
+       printf(" #      %5dx %5ldMB process shared mem operations\n",
+                       g->p.nr_loops, g->p.bytes_process/1024/1024);
+       printf(" #      %5dx %5ldMB thread  local  mem operations\n",
+                       g->p.nr_loops, g->p.bytes_thread/1024/1024);
+
+       printf(" ###\n");
+
+       printf("\n ###\n"); fflush(stdout);
+}
+
+static void init_thread_data(void)
+{
+       ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+       int t;
+
+       g->threads = zalloc_shared_data(size);
+
+       for (t = 0; t < g->p.nr_tasks; t++) {
+               struct thread_data *td = g->threads + t;
+               int cpu;
+
+               /* Allow all nodes by default: */
+               td->bind_node = -1;
+
+               /* Allow all CPUs by default: */
+               CPU_ZERO(&td->bind_cpumask);
+               for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
+                       CPU_SET(cpu, &td->bind_cpumask);
+       }
+}
+
+static void deinit_thread_data(void)
+{
+       ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+
+       free_data(g->threads, size);
+}
+
+static int init(void)
+{
+       g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
+
+       /* Copy over options: */
+       g->p = p0;
+
+       g->p.nr_cpus = numa_num_configured_cpus();
+
+       g->p.nr_nodes = numa_max_node() + 1;
+
+       /* char array in count_process_nodes(): */
+       BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
+
+       if (g->p.show_quiet && !g->p.show_details)
+               g->p.show_details = -1;
+
+       /* Some memory should be specified: */
+       if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
+               return -1;
+
+       if (g->p.mb_global_str) {
+               g->p.mb_global = atof(g->p.mb_global_str);
+               BUG_ON(g->p.mb_global < 0);
+       }
+
+       if (g->p.mb_proc_str) {
+               g->p.mb_proc = atof(g->p.mb_proc_str);
+               BUG_ON(g->p.mb_proc < 0);
+       }
+
+       if (g->p.mb_proc_locked_str) {
+               g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
+               BUG_ON(g->p.mb_proc_locked < 0);
+               BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
+       }
+
+       if (g->p.mb_thread_str) {
+               g->p.mb_thread = atof(g->p.mb_thread_str);
+               BUG_ON(g->p.mb_thread < 0);
+       }
+
+       BUG_ON(g->p.nr_threads <= 0);
+       BUG_ON(g->p.nr_proc <= 0);
+
+       g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
+
+       g->p.bytes_global               = g->p.mb_global        *1024L*1024L;
+       g->p.bytes_process              = g->p.mb_proc          *1024L*1024L;
+       g->p.bytes_process_locked       = g->p.mb_proc_locked   *1024L*1024L;
+       g->p.bytes_thread               = g->p.mb_thread        *1024L*1024L;
+
+       g->data = setup_shared_data(g->p.bytes_global);
+
+       /* Startup serialization: */
+       init_global_mutex(&g->start_work_mutex);
+       init_global_mutex(&g->startup_mutex);
+       init_global_mutex(&g->startup_done_mutex);
+       init_global_mutex(&g->stop_work_mutex);
+
+       init_thread_data();
+
+       tprintf("#\n");
+       parse_setup_cpu_list();
+       parse_setup_node_list();
+       tprintf("#\n");
+
+       print_summary();
+
+       return 0;
+}
+
+static void deinit(void)
+{
+       free_data(g->data, g->p.bytes_global);
+       g->data = NULL;
+
+       deinit_thread_data();
+
+       free_data(g, sizeof(*g));
+       g = NULL;
+}
+
+/*
+ * Print a short or long result, depending on the verbosity setting:
+ */
+static void print_res(const char *name, double val,
+                     const char *txt_unit, const char *txt_short, const char *txt_long)
+{
+       if (!name)
+               name = "main,";
+
+       if (g->p.show_quiet)
+               printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
+       else
+               printf(" %14.3f %s\n", val, txt_long);
+}
+
+static int __bench_numa(const char *name)
+{
+       struct timeval start, stop, diff;
+       u64 runtime_ns_min, runtime_ns_sum;
+       pid_t *pids, pid, wpid;
+       double delta_runtime;
+       double runtime_avg;
+       double runtime_sec_max;
+       double runtime_sec_min;
+       int wait_stat;
+       double bytes;
+       int i, t;
+
+       if (init())
+               return -1;
+
+       pids = zalloc(g->p.nr_proc * sizeof(*pids));
+       pid = -1;
+
+       /* All threads try to acquire it, this way we can wait for them to start up: */
+       pthread_mutex_lock(&g->start_work_mutex);
+
+       if (g->p.serialize_startup) {
+               tprintf(" #\n");
+               tprintf(" # Startup synchronization: ..."); fflush(stdout);
+       }
+
+       gettimeofday(&start, NULL);
+
+       for (i = 0; i < g->p.nr_proc; i++) {
+               pid = fork();
+               dprintf(" # process %2d: PID %d\n", i, pid);
+
+               BUG_ON(pid < 0);
+               if (!pid) {
+                       /* Child process: */
+                       worker_process(i);
+
+                       exit(0);
+               }
+               pids[i] = pid;
+
+       }
+       /* Wait for all the threads to start up: */
+       while (g->nr_tasks_started != g->p.nr_tasks)
+               usleep(1000);
+
+       BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
+
+       if (g->p.serialize_startup) {
+               double startup_sec;
+
+               pthread_mutex_lock(&g->startup_done_mutex);
+
+               /* This will start all threads: */
+               pthread_mutex_unlock(&g->start_work_mutex);
+
+               /* This mutex is locked - the last started thread will wake us: */
+               pthread_mutex_lock(&g->startup_done_mutex);
+
+               gettimeofday(&stop, NULL);
+
+               timersub(&stop, &start, &diff);
+
+               startup_sec = diff.tv_sec * 1000000000.0;
+               startup_sec += diff.tv_usec * 1000.0;
+               startup_sec /= 1e9;
+
+               tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
+               tprintf(" #\n");
+
+               start = stop;
+               pthread_mutex_unlock(&g->startup_done_mutex);
+       } else {
+               gettimeofday(&start, NULL);
+       }
+
+       /* Parent process: */
+
+
+       for (i = 0; i < g->p.nr_proc; i++) {
+               wpid = waitpid(pids[i], &wait_stat, 0);
+               BUG_ON(wpid < 0);
+               BUG_ON(!WIFEXITED(wait_stat));
+
+       }
+
+       runtime_ns_sum = 0;
+       runtime_ns_min = -1LL;
+
+       for (t = 0; t < g->p.nr_tasks; t++) {
+               u64 thread_runtime_ns = g->threads[t].runtime_ns;
+
+               runtime_ns_sum += thread_runtime_ns;
+               runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
+       }
+
+       gettimeofday(&stop, NULL);
+       timersub(&stop, &start, &diff);
+
+       BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
+
+       tprintf("\n ###\n");
+       tprintf("\n");
+
+       runtime_sec_max = diff.tv_sec * 1000000000.0;
+       runtime_sec_max += diff.tv_usec * 1000.0;
+       runtime_sec_max /= 1e9;
+
+       runtime_sec_min = runtime_ns_min/1e9;
+
+       bytes = g->bytes_done;
+       runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / 1e9;
+
+       if (g->p.measure_convergence) {
+               print_res(name, runtime_sec_max,
+                       "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
+       }
+
+       print_res(name, runtime_sec_max,
+               "secs,", "runtime-max/thread",  "secs slowest (max) thread-runtime");
+
+       print_res(name, runtime_sec_min,
+               "secs,", "runtime-min/thread",  "secs fastest (min) thread-runtime");
+
+       print_res(name, runtime_avg,
+               "secs,", "runtime-avg/thread",  "secs average thread-runtime");
+
+       delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
+       print_res(name, delta_runtime / runtime_sec_max * 100.0,
+               "%,", "spread-runtime/thread",  "% difference between max/avg runtime");
+
+       print_res(name, bytes / g->p.nr_tasks / 1e9,
+               "GB,", "data/thread",           "GB data processed, per thread");
+
+       print_res(name, bytes / 1e9,
+               "GB,", "data-total",            "GB data processed, total");
+
+       print_res(name, runtime_sec_max * 1e9 / (bytes / g->p.nr_tasks),
+               "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
+
+       print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
+               "GB/sec,", "thread-speed",      "GB/sec/thread speed");
+
+       print_res(name, bytes / runtime_sec_max / 1e9,
+               "GB/sec,", "total-speed",       "GB/sec total speed");
+
+       free(pids);
+
+       deinit();
+
+       return 0;
+}
+
+#define MAX_ARGS 50
+
+static int command_size(const char **argv)
+{
+       int size = 0;
+
+       while (*argv) {
+               size++;
+               argv++;
+       }
+
+       BUG_ON(size >= MAX_ARGS);
+
+       return size;
+}
+
+static void init_params(struct params *p, const char *name, int argc, const char **argv)
+{
+       int i;
+
+       printf("\n # Running %s \"perf bench numa", name);
+
+       for (i = 0; i < argc; i++)
+               printf(" %s", argv[i]);
+
+       printf("\"\n");
+
+       memset(p, 0, sizeof(*p));
+
+       /* Initialize nonzero defaults: */
+
+       p->serialize_startup            = 1;
+       p->data_reads                   = true;
+       p->data_writes                  = true;
+       p->data_backwards               = true;
+       p->data_rand_walk               = true;
+       p->nr_loops                     = -1;
+       p->init_random                  = true;
+}
+
+static int run_bench_numa(const char *name, const char **argv)
+{
+       int argc = command_size(argv);
+
+       init_params(&p0, name, argc, argv);
+       argc = parse_options(argc, argv, options, bench_numa_usage, 0);
+       if (argc)
+               goto err;
+
+       if (__bench_numa(name))
+               goto err;
+
+       return 0;
+
+err:
+       usage_with_options(numa_usage, options);
+       return -1;
+}
+
+#define OPT_BW_RAM             "-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
+#define OPT_BW_RAM_NOTHP       OPT_BW_RAM,             "--thp", "-1"
+
+#define OPT_CONV               "-s", "100", "-zZ0qcm", "--thp", " 1"
+#define OPT_CONV_NOTHP         OPT_CONV,               "--thp", "-1"
+
+#define OPT_BW                 "-s",  "20", "-zZ0q",   "--thp", " 1"
+#define OPT_BW_NOTHP           OPT_BW,                 "--thp", "-1"
+
+/*
+ * The built-in test-suite executed by "perf bench numa -a".
+ *
+ * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
+ */
+static const char *tests[][MAX_ARGS] = {
+   /* Basic single-stream NUMA bandwidth measurements: */
+   { "RAM-bw-local,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
+                         "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
+   { "RAM-bw-local-NOTHP,",
+                         "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
+                         "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
+   { "RAM-bw-remote,",   "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
+                         "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
+
+   /* 2-stream NUMA bandwidth measurements: */
+   { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
+                          "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
+   { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
+                          "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
+
+   /* Cross-stream NUMA bandwidth measurement: */
+   { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
+                          "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
+
+   /* Convergence latency measurements: */
+   { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
+   { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
+   { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
+   { " 2x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
+   { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
+   { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
+   { " 4x4-convergence-NOTHP,",
+                         "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
+   { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
+   { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
+   { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
+   { " 8x4-convergence-NOTHP,",
+                         "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
+   { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
+   { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
+   { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
+   { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
+   { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
+
+   /* Various NUMA process/thread layout bandwidth measurements: */
+   { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
+   { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
+   { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
+   { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
+   { " 8x1-bw-process-NOTHP,",
+                         "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
+   { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
+
+   { " 4x1-bw-thread,",          "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
+   { " 8x1-bw-thread,",          "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
+   { "16x1-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
+   { "32x1-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
+
+   { " 2x3-bw-thread,",          "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
+   { " 4x4-bw-thread,",          "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
+   { " 4x6-bw-thread,",          "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
+   { " 4x8-bw-thread,",          "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
+   { " 4x8-bw-thread-NOTHP,",
+                         "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
+   { " 3x3-bw-thread,",          "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
+   { " 5x5-bw-thread,",          "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
+
+   { "2x16-bw-thread,",   "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
+   { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
+
+   { "numa02-bw,",       "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
+   { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
+   { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
+   { "numa01-bw-thread-NOTHP,",
+                         "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
+};
+
+static int bench_all(void)
+{
+       int nr = ARRAY_SIZE(tests);
+       int ret;
+       int i;
+
+       ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
+       BUG_ON(ret < 0);
+
+       for (i = 0; i < nr; i++) {
+               if (run_bench_numa(tests[i][0], tests[i] + 1))
+                       return -1;
+       }
+
+       printf("\n");
+
+       return 0;
+}
+
+int bench_numa(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+       init_params(&p0, "main,", argc, argv);
+       argc = parse_options(argc, argv, options, bench_numa_usage, 0);
+       if (argc)
+               goto err;
+
+       if (p0.run_all)
+               return bench_all();
+
+       if (__bench_numa(NULL))
+               goto err;
+
+       return 0;
+
+err:
+       usage_with_options(numa_usage, options);
+       return -1;
+}
index dc870cf31b79beefa01c777680efb761c0ed3377..2e6961ea3184f510e4f51143168bc1ea8eb607ae 100644 (file)
 
 struct perf_annotate {
        struct perf_tool tool;
-       bool       force, use_tui, use_stdio;
+       bool       force, use_tui, use_stdio, use_gtk;
        bool       full_paths;
        bool       print_line;
+       bool       skip_missing;
        const char *sym_hist_filter;
        const char *cpu_list;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -138,9 +139,22 @@ find_next:
                        continue;
                }
 
-               if (use_browser > 0) {
+               if (use_browser == 2) {
+                       int ret;
+
+                       ret = hist_entry__gtk_annotate(he, evidx, NULL);
+                       if (!ret || !ann->skip_missing)
+                               return;
+
+                       /* skip missing symbols */
+                       nd = rb_next(nd);
+               } else if (use_browser == 1) {
                        key = hist_entry__tui_annotate(he, evidx, NULL);
                        switch (key) {
+                       case -1:
+                               if (!ann->skip_missing)
+                                       return;
+                               /* fall through */
                        case K_RIGHT:
                                next = rb_next(nd);
                                break;
@@ -224,6 +238,10 @@ static int __cmd_annotate(struct perf_annotate *ann)
                ui__error("The %s file has no samples!\n", session->filename);
                goto out_delete;
        }
+
+       if (use_browser == 2)
+               perf_gtk__show_annotations();
+
 out_delete:
        /*
         * Speed up the exit process, for large files this can
@@ -270,6 +288,7 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
                    "be more verbose (show symbol address, etc)"),
        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
                    "dump raw trace in ASCII"),
+       OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"),
        OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
        OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
@@ -280,6 +299,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
                    "print matching source lines (may be slow)"),
        OPT_BOOLEAN('P', "full-paths", &annotate.full_paths,
                    "Don't shorten the displayed pathnames"),
+       OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
+                   "Skip symbols that cannot be annotated"),
        OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
        OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
                   "Look for files with symbols relative to this directory"),
@@ -300,6 +321,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
                use_browser = 0;
        else if (annotate.use_tui)
                use_browser = 1;
+       else if (annotate.use_gtk)
+               use_browser = 2;
 
        setup_browser(true);
 
@@ -309,7 +332,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
        if (symbol__init() < 0)
                return -1;
 
-       setup_sorting(annotate_usage, options);
+       if (setup_sorting() < 0)
+               usage_with_options(annotate_usage, options);
 
        if (argc) {
                /*
index cae9a5fd2ecfe97489a079146746727719eeb372..77298bf892b85c68d3c662cfe62e050518630a49 100644 (file)
@@ -35,6 +35,18 @@ struct bench_suite {
 /* sentinel: easy for help */
 #define suite_all { "all", "Test all benchmark suites", NULL }
 
+#ifdef LIBNUMA_SUPPORT
+static struct bench_suite numa_suites[] = {
+       { "mem",
+         "Benchmark for NUMA workloads",
+         bench_numa },
+       suite_all,
+       { NULL,
+         NULL,
+         NULL                  }
+};
+#endif
+
 static struct bench_suite sched_suites[] = {
        { "messaging",
          "Benchmark for scheduler and IPC mechanisms",
@@ -68,6 +80,11 @@ struct bench_subsys {
 };
 
 static struct bench_subsys subsystems[] = {
+#ifdef LIBNUMA_SUPPORT
+       { "numa",
+         "NUMA scheduling and MM behavior",
+         numa_suites },
+#endif
        { "sched",
          "scheduler and IPC mechanism",
          sched_suites },
@@ -159,6 +176,7 @@ static void all_suite(struct bench_subsys *subsys)    /* FROM HERE */
                printf("# Running %s/%s benchmark...\n",
                       subsys->name,
                       suites[i].name);
+               fflush(stdout);
 
                argv[1] = suites[i].name;
                suites[i].fn(1, argv, NULL);
@@ -225,6 +243,7 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
                                printf("# Running %s/%s benchmark...\n",
                                       subsystems[i].name,
                                       subsystems[i].suites[j].name);
+                       fflush(stdout);
                        status = subsystems[i].suites[j].fn(argc - 1,
                                                            argv + 1, prefix);
                        goto end;
index fae8b250b2ca711a8581fd70ab2142a1ddd5e9e5..c96c8fa3824353f1c5401cd2a6bacb9701a2762e 100644 (file)
@@ -14,6 +14,7 @@
 #include "util/parse-options.h"
 #include "util/strlist.h"
 #include "util/build-id.h"
+#include "util/session.h"
 #include "util/symbol.h"
 
 static int build_id_cache__add_file(const char *filename, const char *debugdir)
@@ -58,19 +59,89 @@ static int build_id_cache__remove_file(const char *filename,
        return err;
 }
 
+static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
+{
+       char filename[PATH_MAX];
+       u8 build_id[BUILD_ID_SIZE];
+
+       if (dso__build_id_filename(dso, filename, sizeof(filename)) &&
+           filename__read_build_id(filename, build_id,
+                                   sizeof(build_id)) != sizeof(build_id)) {
+               if (errno == ENOENT)
+                       return false;
+
+               pr_warning("Problems with %s file, consider removing it from the cache\n", 
+                          filename);
+       } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
+               pr_warning("Problems with %s file, consider removing it from the cache\n", 
+                          filename);
+       }
+
+       return true;
+}
+
+static int build_id_cache__fprintf_missing(const char *filename, bool force, FILE *fp)
+{
+       struct perf_session *session = perf_session__new(filename, O_RDONLY,
+                                                        force, false, NULL);
+       if (session == NULL)
+               return -1;
+
+       perf_session__fprintf_dsos_buildid(session, fp, dso__missing_buildid_cache, 0);
+       perf_session__delete(session);
+
+       return 0;
+}
+
+static int build_id_cache__update_file(const char *filename,
+                                      const char *debugdir)
+{
+       u8 build_id[BUILD_ID_SIZE];
+       char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+
+       int err;
+
+       if (filename__read_build_id(filename, &build_id, sizeof(build_id)) < 0) {
+               pr_debug("Couldn't read a build-id in %s\n", filename);
+               return -1;
+       }
+
+       build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+       err = build_id_cache__remove_s(sbuild_id, debugdir);
+       if (!err) {
+               err = build_id_cache__add_s(sbuild_id, debugdir, filename,
+                                           false, false);
+       }
+       if (verbose)
+               pr_info("Updating %s %s: %s\n", sbuild_id, filename,
+                       err ? "FAIL" : "Ok");
+
+       return err;
+}
+
 int cmd_buildid_cache(int argc, const char **argv,
                      const char *prefix __maybe_unused)
 {
        struct strlist *list;
        struct str_node *pos;
+       int ret = 0;
+       bool force = false;
        char debugdir[PATH_MAX];
        char const *add_name_list_str = NULL,
-                  *remove_name_list_str = NULL;
+                  *remove_name_list_str = NULL,
+                  *missing_filename = NULL,
+                  *update_name_list_str = NULL;
+
        const struct option buildid_cache_options[] = {
        OPT_STRING('a', "add", &add_name_list_str,
                   "file list", "file(s) to add"),
        OPT_STRING('r', "remove", &remove_name_list_str, "file list",
                    "file(s) to remove"),
+       OPT_STRING('M', "missing", &missing_filename, "file",
+                  "to find missing build ids in the cache"),
+       OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+       OPT_STRING('u', "update", &update_name_list_str, "file list",
+                   "file(s) to update"),
        OPT_INCR('v', "verbose", &verbose, "be more verbose"),
        OPT_END()
        };
@@ -125,5 +196,26 @@ int cmd_buildid_cache(int argc, const char **argv,
                }
        }
 
-       return 0;
+       if (missing_filename)
+               ret = build_id_cache__fprintf_missing(missing_filename, force, stdout);
+
+       if (update_name_list_str) {
+               list = strlist__new(true, update_name_list_str);
+               if (list) {
+                       strlist__for_each(pos, list)
+                               if (build_id_cache__update_file(pos->s, debugdir)) {
+                                       if (errno == ENOENT) {
+                                               pr_debug("%s wasn't in the cache\n",
+                                                        pos->s);
+                                               continue;
+                                       }
+                                       pr_warning("Couldn't update %s: %s\n",
+                                                  pos->s, strerror(errno));
+                               }
+
+                       strlist__delete(list);
+               }
+       }
+
+       return ret;
 }
index a82d99fec83e2b2f9b1e9ea1d941847baa5243fc..e74366a13218dbc5da8ef576b298d8bcc21c4ed6 100644 (file)
@@ -44,23 +44,26 @@ static int filename__fprintf_build_id(const char *name, FILE *fp)
        return fprintf(fp, "%s\n", sbuild_id);
 }
 
+static bool dso__skip_buildid(struct dso *dso, int with_hits)
+{
+       return with_hits && !dso->hit;
+}
+
 static int perf_session__list_build_ids(bool force, bool with_hits)
 {
        struct perf_session *session;
 
        symbol__elf_init();
-
-       session = perf_session__new(input_name, O_RDONLY, force, false,
-                                   &build_id__mark_dso_hit_ops);
-       if (session == NULL)
-               return -1;
-
        /*
         * See if this is an ELF file first:
         */
-       if (filename__fprintf_build_id(session->filename, stdout))
+       if (filename__fprintf_build_id(input_name, stdout))
                goto out;
 
+       session = perf_session__new(input_name, O_RDONLY, force, false,
+                                   &build_id__mark_dso_hit_ops);
+       if (session == NULL)
+               return -1;
        /*
         * in pipe-mode, the only way to get the buildids is to parse
         * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
@@ -68,9 +71,9 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
        if (with_hits || session->fd_pipe)
                perf_session__process_events(session, &build_id__mark_dso_hit_ops);
 
-       perf_session__fprintf_dsos_buildid(session, stdout, with_hits);
-out:
+       perf_session__fprintf_dsos_buildid(session, stdout, dso__skip_buildid, with_hits);
        perf_session__delete(session);
+out:
        return 0;
 }
 
index 93b852f8a5d5936c902a720ed74cde4904bfa781..d207a97a2db1e96887418f354961d947bfecb7c2 100644 (file)
@@ -23,7 +23,6 @@ static char const *input_old = "perf.data.old",
                  *input_new = "perf.data";
 static char      diff__default_sort_order[] = "dso,symbol";
 static bool  force;
-static bool show_displacement;
 static bool show_period;
 static bool show_formula;
 static bool show_baseline_only;
@@ -146,58 +145,47 @@ static int setup_compute(const struct option *opt, const char *str,
        return -EINVAL;
 }
 
-static double get_period_percent(struct hist_entry *he, u64 period)
+double perf_diff__period_percent(struct hist_entry *he, u64 period)
 {
        u64 total = he->hists->stats.total_period;
        return (period * 100.0) / total;
 }
 
-double perf_diff__compute_delta(struct hist_entry *he)
+double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair)
 {
-       struct hist_entry *pair = hist_entry__next_pair(he);
-       double new_percent = get_period_percent(he, he->stat.period);
-       double old_percent = pair ? get_period_percent(pair, pair->stat.period) : 0.0;
+       double new_percent = perf_diff__period_percent(he, he->stat.period);
+       double old_percent = perf_diff__period_percent(pair, pair->stat.period);
 
        he->diff.period_ratio_delta = new_percent - old_percent;
        he->diff.computed = true;
        return he->diff.period_ratio_delta;
 }
 
-double perf_diff__compute_ratio(struct hist_entry *he)
+double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair)
 {
-       struct hist_entry *pair = hist_entry__next_pair(he);
        double new_period = he->stat.period;
-       double old_period = pair ? pair->stat.period : 0;
+       double old_period = pair->stat.period;
 
        he->diff.computed = true;
-       he->diff.period_ratio = pair ? (new_period / old_period) : 0;
+       he->diff.period_ratio = new_period / old_period;
        return he->diff.period_ratio;
 }
 
-s64 perf_diff__compute_wdiff(struct hist_entry *he)
+s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair)
 {
-       struct hist_entry *pair = hist_entry__next_pair(he);
        u64 new_period = he->stat.period;
-       u64 old_period = pair ? pair->stat.period : 0;
+       u64 old_period = pair->stat.period;
 
        he->diff.computed = true;
-
-       if (!pair)
-               he->diff.wdiff = 0;
-       else
-               he->diff.wdiff = new_period * compute_wdiff_w2 -
-                                old_period * compute_wdiff_w1;
+       he->diff.wdiff = new_period * compute_wdiff_w2 -
+                        old_period * compute_wdiff_w1;
 
        return he->diff.wdiff;
 }
 
-static int formula_delta(struct hist_entry *he, char *buf, size_t size)
+static int formula_delta(struct hist_entry *he, struct hist_entry *pair,
+                        char *buf, size_t size)
 {
-       struct hist_entry *pair = hist_entry__next_pair(he);
-
-       if (!pair)
-               return -1;
-
        return scnprintf(buf, size,
                         "(%" PRIu64 " * 100 / %" PRIu64 ") - "
                         "(%" PRIu64 " * 100 / %" PRIu64 ")",
@@ -205,41 +193,36 @@ static int formula_delta(struct hist_entry *he, char *buf, size_t size)
                          pair->stat.period, pair->hists->stats.total_period);
 }
 
-static int formula_ratio(struct hist_entry *he, char *buf, size_t size)
+static int formula_ratio(struct hist_entry *he, struct hist_entry *pair,
+                        char *buf, size_t size)
 {
-       struct hist_entry *pair = hist_entry__next_pair(he);
        double new_period = he->stat.period;
-       double old_period = pair ? pair->stat.period : 0;
-
-       if (!pair)
-               return -1;
+       double old_period = pair->stat.period;
 
        return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period);
 }
 
-static int formula_wdiff(struct hist_entry *he, char *buf, size_t size)
+static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair,
+                        char *buf, size_t size)
 {
-       struct hist_entry *pair = hist_entry__next_pair(he);
        u64 new_period = he->stat.period;
-       u64 old_period = pair ? pair->stat.period : 0;
-
-       if (!pair)
-               return -1;
+       u64 old_period = pair->stat.period;
 
        return scnprintf(buf, size,
                  "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")",
                  new_period, compute_wdiff_w2, old_period, compute_wdiff_w1);
 }
 
-int perf_diff__formula(char *buf, size_t size, struct hist_entry *he)
+int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
+                      char *buf, size_t size)
 {
        switch (compute) {
        case COMPUTE_DELTA:
-               return formula_delta(he, buf, size);
+               return formula_delta(he, pair, buf, size);
        case COMPUTE_RATIO:
-               return formula_ratio(he, buf, size);
+               return formula_ratio(he, pair, buf, size);
        case COMPUTE_WEIGHTED_DIFF:
-               return formula_wdiff(he, buf, size);
+               return formula_wdiff(he, pair, buf, size);
        default:
                BUG_ON(1);
        }
@@ -292,48 +275,6 @@ static struct perf_tool tool = {
        .ordering_requires_timestamps = true,
 };
 
-static void insert_hist_entry_by_name(struct rb_root *root,
-                                     struct hist_entry *he)
-{
-       struct rb_node **p = &root->rb_node;
-       struct rb_node *parent = NULL;
-       struct hist_entry *iter;
-
-       while (*p != NULL) {
-               parent = *p;
-               iter = rb_entry(parent, struct hist_entry, rb_node);
-               if (hist_entry__cmp(he, iter) < 0)
-                       p = &(*p)->rb_left;
-               else
-                       p = &(*p)->rb_right;
-       }
-
-       rb_link_node(&he->rb_node, parent, p);
-       rb_insert_color(&he->rb_node, root);
-}
-
-static void hists__name_resort(struct hists *self, bool sort)
-{
-       unsigned long position = 1;
-       struct rb_root tmp = RB_ROOT;
-       struct rb_node *next = rb_first(&self->entries);
-
-       while (next != NULL) {
-               struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);
-
-               next = rb_next(&n->rb_node);
-               n->position = position++;
-
-               if (sort) {
-                       rb_erase(&n->rb_node, &self->entries);
-                       insert_hist_entry_by_name(&tmp, n);
-               }
-       }
-
-       if (sort)
-               self->entries = tmp;
-}
-
 static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
                                      struct perf_evlist *evlist)
 {
@@ -346,34 +287,34 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
        return NULL;
 }
 
-static void perf_evlist__resort_hists(struct perf_evlist *evlist, bool name)
+static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
        list_for_each_entry(evsel, &evlist->entries, node) {
                struct hists *hists = &evsel->hists;
 
-               hists__output_resort(hists);
-
-               /*
-                * The hists__name_resort only sets possition
-                * if name is false.
-                */
-               if (name || ((!name) && show_displacement))
-                       hists__name_resort(hists, name);
+               hists__collapse_resort(hists);
        }
 }
 
 static void hists__baseline_only(struct hists *hists)
 {
-       struct rb_node *next = rb_first(&hists->entries);
+       struct rb_root *root;
+       struct rb_node *next;
 
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       next = rb_first(root);
        while (next != NULL) {
-               struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
+               struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
 
-               next = rb_next(&he->rb_node);
+               next = rb_next(&he->rb_node_in);
                if (!hist_entry__next_pair(he)) {
-                       rb_erase(&he->rb_node, &hists->entries);
+                       rb_erase(&he->rb_node_in, root);
                        hist_entry__free(he);
                }
        }
@@ -385,18 +326,21 @@ static void hists__precompute(struct hists *hists)
 
        while (next != NULL) {
                struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
+               struct hist_entry *pair = hist_entry__next_pair(he);
 
                next = rb_next(&he->rb_node);
+               if (!pair)
+                       continue;
 
                switch (compute) {
                case COMPUTE_DELTA:
-                       perf_diff__compute_delta(he);
+                       perf_diff__compute_delta(he, pair);
                        break;
                case COMPUTE_RATIO:
-                       perf_diff__compute_ratio(he);
+                       perf_diff__compute_ratio(he, pair);
                        break;
                case COMPUTE_WEIGHTED_DIFF:
-                       perf_diff__compute_wdiff(he);
+                       perf_diff__compute_wdiff(he, pair);
                        break;
                default:
                        BUG_ON(1);
@@ -470,19 +414,30 @@ static void insert_hist_entry_by_compute(struct rb_root *root,
 
 static void hists__compute_resort(struct hists *hists)
 {
-       struct rb_root tmp = RB_ROOT;
-       struct rb_node *next = rb_first(&hists->entries);
+       struct rb_root *root;
+       struct rb_node *next;
+
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       hists->entries = RB_ROOT;
+       next = rb_first(root);
+
+       hists->nr_entries = 0;
+       hists->stats.total_period = 0;
+       hists__reset_col_len(hists);
 
        while (next != NULL) {
-               struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
+               struct hist_entry *he;
 
-               next = rb_next(&he->rb_node);
+               he = rb_entry(next, struct hist_entry, rb_node_in);
+               next = rb_next(&he->rb_node_in);
 
-               rb_erase(&he->rb_node, &hists->entries);
-               insert_hist_entry_by_compute(&tmp, he, compute);
+               insert_hist_entry_by_compute(&hists->entries, he, compute);
+               hists__inc_nr_entries(hists, he);
        }
-
-       hists->entries = tmp;
 }
 
 static void hists__process(struct hists *old, struct hists *new)
@@ -497,6 +452,8 @@ static void hists__process(struct hists *old, struct hists *new)
        if (sort_compute) {
                hists__precompute(new);
                hists__compute_resort(new);
+       } else {
+               hists__output_resort(new);
        }
 
        hists__fprintf(new, true, 0, 0, stdout);
@@ -528,8 +485,8 @@ static int __cmd_diff(void)
        evlist_old = older->evlist;
        evlist_new = newer->evlist;
 
-       perf_evlist__resort_hists(evlist_old, true);
-       perf_evlist__resort_hists(evlist_new, false);
+       perf_evlist__collapse_resort(evlist_old);
+       perf_evlist__collapse_resort(evlist_new);
 
        list_for_each_entry(evsel, &evlist_new->entries, node) {
                struct perf_evsel *evsel_old;
@@ -562,8 +519,6 @@ static const char * const diff_usage[] = {
 static const struct option options[] = {
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show symbol address, etc)"),
-       OPT_BOOLEAN('M', "displacement", &show_displacement,
-                   "Show position displacement relative to baseline"),
        OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
                    "Show only items with match in baseline"),
        OPT_CALLBACK('c', "compute", &compute,
@@ -597,40 +552,32 @@ static const struct option options[] = {
 
 static void ui_init(void)
 {
-       perf_hpp__init();
-
-       /* No overhead column. */
-       perf_hpp__column_enable(PERF_HPP__OVERHEAD, false);
-
        /*
-        * Display baseline/delta/ratio/displacement/
+        * Display baseline/delta/ratio
         * formula/periods columns.
         */
-       perf_hpp__column_enable(PERF_HPP__BASELINE, true);
+       perf_hpp__column_enable(PERF_HPP__BASELINE);
 
        switch (compute) {
        case COMPUTE_DELTA:
-               perf_hpp__column_enable(PERF_HPP__DELTA, true);
+               perf_hpp__column_enable(PERF_HPP__DELTA);
                break;
        case COMPUTE_RATIO:
-               perf_hpp__column_enable(PERF_HPP__RATIO, true);
+               perf_hpp__column_enable(PERF_HPP__RATIO);
                break;
        case COMPUTE_WEIGHTED_DIFF:
-               perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF, true);
+               perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF);
                break;
        default:
                BUG_ON(1);
        };
 
-       if (show_displacement)
-               perf_hpp__column_enable(PERF_HPP__DISPL, true);
-
        if (show_formula)
-               perf_hpp__column_enable(PERF_HPP__FORMULA, true);
+               perf_hpp__column_enable(PERF_HPP__FORMULA);
 
        if (show_period) {
-               perf_hpp__column_enable(PERF_HPP__PERIOD, true);
-               perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE, true);
+               perf_hpp__column_enable(PERF_HPP__PERIOD);
+               perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE);
        }
 }
 
@@ -658,7 +605,9 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
 
        ui_init();
 
-       setup_sorting(diff_usage, options);
+       if (setup_sorting() < 0)
+               usage_with_options(diff_usage, options);
+
        setup_pager();
 
        sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", NULL);
index c20f1dcfb7e226f3348727c9957d3260554a032d..05bd9dfe875cb95aeb6d2008b3319c2e8ca8f5eb 100644 (file)
 #include "util/parse-options.h"
 #include "util/session.h"
 
-struct perf_attr_details {
-       bool freq;
-       bool verbose;
-};
-
-static int comma_printf(bool *first, const char *fmt, ...)
-{
-       va_list args;
-       int ret = 0;
-
-       if (!*first) {
-               ret += printf(",");
-       } else {
-               ret += printf(":");
-               *first = false;
-       }
-
-       va_start(args, fmt);
-       ret += vprintf(fmt, args);
-       va_end(args);
-       return ret;
-}
-
-static int __if_print(bool *first, const char *field, u64 value)
-{
-       if (value == 0)
-               return 0;
-
-       return comma_printf(first, " %s: %" PRIu64, field, value);
-}
-
-#define if_print(field) __if_print(&first, #field, pos->attr.field)
-
 static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
 {
        struct perf_session *session;
@@ -57,52 +24,8 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
        if (session == NULL)
                return -ENOMEM;
 
-       list_for_each_entry(pos, &session->evlist->entries, node) {
-               bool first = true;
-
-               printf("%s", perf_evsel__name(pos));
-
-               if (details->verbose || details->freq) {
-                       comma_printf(&first, " sample_freq=%" PRIu64,
-                                    (u64)pos->attr.sample_freq);
-               }
-
-               if (details->verbose) {
-                       if_print(type);
-                       if_print(config);
-                       if_print(config1);
-                       if_print(config2);
-                       if_print(size);
-                       if_print(sample_type);
-                       if_print(read_format);
-                       if_print(disabled);
-                       if_print(inherit);
-                       if_print(pinned);
-                       if_print(exclusive);
-                       if_print(exclude_user);
-                       if_print(exclude_kernel);
-                       if_print(exclude_hv);
-                       if_print(exclude_idle);
-                       if_print(mmap);
-                       if_print(comm);
-                       if_print(freq);
-                       if_print(inherit_stat);
-                       if_print(enable_on_exec);
-                       if_print(task);
-                       if_print(watermark);
-                       if_print(precise_ip);
-                       if_print(mmap_data);
-                       if_print(sample_id_all);
-                       if_print(exclude_host);
-                       if_print(exclude_guest);
-                       if_print(__reserved_1);
-                       if_print(wakeup_events);
-                       if_print(bp_type);
-                       if_print(branch_sample_type);
-               }
-
-               putchar('\n');
-       }
+       list_for_each_entry(pos, &session->evlist->entries, node)
+               perf_evsel__fprintf(pos, details, stdout);
 
        perf_session__delete(session);
        return 0;
@@ -116,6 +39,8 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN('F', "freq", &details.freq, "Show the sample frequency"),
        OPT_BOOLEAN('v', "verbose", &details.verbose,
                    "Show all event attr details"),
+       OPT_BOOLEAN('g', "group", &details.event_group,
+                   "Show event group information"),
        OPT_END()
        };
        const char * const evlist_usage[] = {
@@ -127,5 +52,10 @@ int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused)
        if (argc)
                usage_with_options(evlist_usage, options);
 
+       if (details.event_group && (details.verbose || details.freq)) {
+               pr_err("--group option is not compatible with other options\n");
+               usage_with_options(evlist_usage, options);
+       }
+
        return __cmd_evlist(input_name, &details);
 }
index 0b4b796167beca45dc12736527fa7bcfe2560d9e..46878daca5cc723b3200763e023744e87c7dc2fd 100644 (file)
@@ -17,6 +17,7 @@
 #include "util/debug.h"
 
 #include <linux/rbtree.h>
+#include <linux/string.h>
 
 struct alloc_stat;
 typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
@@ -340,7 +341,7 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
                           int n_lines, int is_caller)
 {
        struct rb_node *next;
-       struct machine *machine;
+       struct machine *machine = &session->machines.host;
 
        printf("%.102s\n", graph_dotted_line);
        printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
@@ -349,11 +350,6 @@ static void __print_result(struct rb_root *root, struct perf_session *session,
 
        next = rb_first(root);
 
-       machine = perf_session__find_host_machine(session);
-       if (!machine) {
-               pr_err("__print_result: couldn't find kernel information\n");
-               return;
-       }
        while (next && n_lines--) {
                struct alloc_stat *data = rb_entry(next, struct alloc_stat,
                                                   node);
@@ -614,8 +610,7 @@ static struct sort_dimension *avail_sorts[] = {
        &pingpong_sort_dimension,
 };
 
-#define NUM_AVAIL_SORTS        \
-       (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
+#define NUM_AVAIL_SORTS        ((int)ARRAY_SIZE(avail_sorts))
 
 static int sort_dimension__add(const char *tok, struct list_head *list)
 {
@@ -624,12 +619,11 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
 
        for (i = 0; i < NUM_AVAIL_SORTS; i++) {
                if (!strcmp(avail_sorts[i]->name, tok)) {
-                       sort = malloc(sizeof(*sort));
+                       sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
                        if (!sort) {
-                               pr_err("%s: malloc failed\n", __func__);
+                               pr_err("%s: memdup failed\n", __func__);
                                return -1;
                        }
-                       memcpy(sort, avail_sorts[i], sizeof(*sort));
                        list_add_tail(&sort->list, list);
                        return 0;
                }
index ca3f80ebc100ce271007940cf32a4f16856f1628..37a769d7f9fead5a666e3124acf94a5570e505fd 100644 (file)
@@ -973,8 +973,7 @@ __cmd_buildid_list(const char *file_name, int argc, const char **argv)
 
 int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
 {
-       const char *file_name;
-
+       const char *file_name = NULL;
        const struct option kvm_options[] = {
                OPT_STRING('i', "input", &file_name, "file",
                           "Input file name"),
index f3151d3c70ce15c6b93e57a8c3ebe0f70425f0a8..774c90713a53fe8a27f0561a8476d5c23748edab 100644 (file)
@@ -224,130 +224,28 @@ static bool perf_evlist__equal(struct perf_evlist *evlist,
 
 static int perf_record__open(struct perf_record *rec)
 {
+       char msg[512];
        struct perf_evsel *pos;
        struct perf_evlist *evlist = rec->evlist;
        struct perf_session *session = rec->session;
        struct perf_record_opts *opts = &rec->opts;
        int rc = 0;
 
-       /*
-        * Set the evsel leader links before we configure attributes,
-        * since some might depend on this info.
-        */
-       if (opts->group)
-               perf_evlist__set_leader(evlist);
-
-       perf_evlist__config_attrs(evlist, opts);
+       perf_evlist__config(evlist, opts);
 
        list_for_each_entry(pos, &evlist->entries, node) {
-               struct perf_event_attr *attr = &pos->attr;
-               /*
-                * Check if parse_single_tracepoint_event has already asked for
-                * PERF_SAMPLE_TIME.
-                *
-                * XXX this is kludgy but short term fix for problems introduced by
-                * eac23d1c that broke 'perf script' by having different sample_types
-                * when using multiple tracepoint events when we use a perf binary
-                * that tries to use sample_id_all on an older kernel.
-                *
-                * We need to move counter creation to perf_session, support
-                * different sample_types, etc.
-                */
-               bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
-
-fallback_missing_features:
-               if (opts->exclude_guest_missing)
-                       attr->exclude_guest = attr->exclude_host = 0;
-retry_sample_id:
-               attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
 try_again:
                if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
-                       int err = errno;
-
-                       if (err == EPERM || err == EACCES) {
-                               ui__error_paranoid();
-                               rc = -err;
-                               goto out;
-                       } else if (err ==  ENODEV && opts->target.cpu_list) {
-                               pr_err("No such device - did you specify"
-                                      " an out-of-range profile CPU?\n");
-                               rc = -err;
-                               goto out;
-                       } else if (err == EINVAL) {
-                               if (!opts->exclude_guest_missing &&
-                                   (attr->exclude_guest || attr->exclude_host)) {
-                                       pr_debug("Old kernel, cannot exclude "
-                                                "guest or host samples.\n");
-                                       opts->exclude_guest_missing = true;
-                                       goto fallback_missing_features;
-                               } else if (!opts->sample_id_all_missing) {
-                                       /*
-                                        * Old kernel, no attr->sample_id_type_all field
-                                        */
-                                       opts->sample_id_all_missing = true;
-                                       if (!opts->sample_time && !opts->raw_samples && !time_needed)
-                                               attr->sample_type &= ~PERF_SAMPLE_TIME;
-
-                                       goto retry_sample_id;
-                               }
-                       }
-
-                       /*
-                        * If it's cycles then fall back to hrtimer
-                        * based cpu-clock-tick sw counter, which
-                        * is always available even if no PMU support.
-                        *
-                        * PPC returns ENXIO until 2.6.37 (behavior changed
-                        * with commit b0a873e).
-                        */
-                       if ((err == ENOENT || err == ENXIO)
-                                       && attr->type == PERF_TYPE_HARDWARE
-                                       && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
-
+                       if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
                                if (verbose)
-                                       ui__warning("The cycles event is not supported, "
-                                                   "trying to fall back to cpu-clock-ticks\n");
-                               attr->type = PERF_TYPE_SOFTWARE;
-                               attr->config = PERF_COUNT_SW_CPU_CLOCK;
-                               if (pos->name) {
-                                       free(pos->name);
-                                       pos->name = NULL;
-                               }
+                                       ui__warning("%s\n", msg);
                                goto try_again;
                        }
 
-                       if (err == ENOENT) {
-                               ui__error("The %s event is not supported.\n",
-                                         perf_evsel__name(pos));
-                               rc = -err;
-                               goto out;
-                       } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
-                               ui__error("\'precise\' request may not be supported. "
-                                         "Try removing 'p' modifier\n");
-                               rc = -err;
-                               goto out;
-                       }
-
-                       printf("\n");
-                       error("sys_perf_event_open() syscall returned with %d "
-                             "(%s) for event %s. /bin/dmesg may provide "
-                             "additional information.\n",
-                             err, strerror(err), perf_evsel__name(pos));
-
-#if defined(__i386__) || defined(__x86_64__)
-                       if (attr->type == PERF_TYPE_HARDWARE &&
-                           err == EOPNOTSUPP) {
-                               pr_err("No hardware sampling interrupt available."
-                                      " No APIC? If so then you can boot the kernel"
-                                      " with the \"lapic\" boot parameter to"
-                                      " force-enable it.\n");
-                               rc = -err;
-                               goto out;
-                       }
-#endif
-
-                       pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
-                       rc = -err;
+                       rc = -errno;
+                       perf_evsel__open_strerror(pos, &opts->target,
+                                                 errno, msg, sizeof(msg));
+                       ui__error("%s\n", msg);
                        goto out;
                }
        }
@@ -430,10 +328,6 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
 {
        int err;
        struct perf_tool *tool = data;
-
-       if (machine__is_host(machine))
-               return;
-
        /*
         *As for guest kernel when processing subcommand record&report,
         *we arrange module mmap prior to guest kernel mmap and trigger
@@ -592,6 +486,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
                goto out_delete_session;
        }
 
+       if (!evsel_list->nr_groups)
+               perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
+
        /*
         * perf_session__delete(session) will be called at perf_record__exit()
         */
@@ -618,12 +515,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
 
        rec->post_processing_offset = lseek(output, 0, SEEK_CUR);
 
-       machine = perf_session__find_host_machine(session);
-       if (!machine) {
-               pr_err("Couldn't find native kernel information.\n");
-               err = -1;
-               goto out_delete_session;
-       }
+       machine = &session->machines.host;
 
        if (opts->pipe_output) {
                err = perf_event__synthesize_attrs(tool, session,
@@ -676,9 +568,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
                       "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
                       "Check /proc/modules permission or run as root.\n");
 
-       if (perf_guest)
-               perf_session__process_machines(session, tool,
-                                              perf_event__synthesize_guest_os);
+       if (perf_guest) {
+               machines__process_guests(&session->machines,
+                                        perf_event__synthesize_guest_os, tool);
+       }
 
        if (!opts->target.system_wide)
                err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
@@ -875,11 +768,10 @@ static int get_stack_size(char *str, unsigned long *_size)
 }
 #endif /* LIBUNWIND_SUPPORT */
 
-static int
-parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
-                   int unset)
+int record_parse_callchain_opt(const struct option *opt,
+                              const char *arg, int unset)
 {
-       struct perf_record *rec = (struct perf_record *)opt->value;
+       struct perf_record_opts *opts = opt->value;
        char *tok, *name, *saveptr = NULL;
        char *buf;
        int ret = -1;
@@ -905,7 +797,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
                /* Framepointer style */
                if (!strncmp(name, "fp", sizeof("fp"))) {
                        if (!strtok_r(NULL, ",", &saveptr)) {
-                               rec->opts.call_graph = CALLCHAIN_FP;
+                               opts->call_graph = CALLCHAIN_FP;
                                ret = 0;
                        } else
                                pr_err("callchain: No more arguments "
@@ -918,20 +810,20 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
                        const unsigned long default_stack_dump_size = 8192;
 
                        ret = 0;
-                       rec->opts.call_graph = CALLCHAIN_DWARF;
-                       rec->opts.stack_dump_size = default_stack_dump_size;
+                       opts->call_graph = CALLCHAIN_DWARF;
+                       opts->stack_dump_size = default_stack_dump_size;
 
                        tok = strtok_r(NULL, ",", &saveptr);
                        if (tok) {
                                unsigned long size = 0;
 
                                ret = get_stack_size(tok, &size);
-                               rec->opts.stack_dump_size = size;
+                               opts->stack_dump_size = size;
                        }
 
                        if (!ret)
                                pr_debug("callchain: stack dump size %d\n",
-                                        rec->opts.stack_dump_size);
+                                        opts->stack_dump_size);
 #endif /* LIBUNWIND_SUPPORT */
                } else {
                        pr_err("callchain: Unknown -g option "
@@ -944,7 +836,7 @@ parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg,
        free(buf);
 
        if (!ret)
-               pr_debug("callchain: type %d\n", rec->opts.call_graph);
+               pr_debug("callchain: type %d\n", opts->call_graph);
 
        return ret;
 }
@@ -982,9 +874,9 @@ static struct perf_record record = {
 #define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: "
 
 #ifdef LIBUNWIND_SUPPORT
-static const char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
+const char record_callchain_help[] = CALLCHAIN_HELP "[fp] dwarf";
 #else
-static const char callchain_help[] = CALLCHAIN_HELP "[fp]";
+const char record_callchain_help[] = CALLCHAIN_HELP "[fp]";
 #endif
 
 /*
@@ -1028,9 +920,9 @@ const struct option record_options[] = {
                     "number of mmap data pages"),
        OPT_BOOLEAN(0, "group", &record.opts.group,
                    "put the counters into a counter group"),
-       OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]",
-                            callchain_help, &parse_callchain_opt,
-                            "fp"),
+       OPT_CALLBACK_DEFAULT('g', "call-graph", &record.opts,
+                            "mode[,dump_size]", record_callchain_help,
+                            &record_parse_callchain_opt, "fp"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show counter open errors, etc)"),
        OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
index fc251005dd3d9ce954f4e382517f305b7dc3070d..96b5a7fee4bbe45a0f2558fb243e706ad65b8567 100644 (file)
@@ -8,6 +8,7 @@
 #include "builtin.h"
 
 #include "util/util.h"
+#include "util/cache.h"
 
 #include "util/annotate.h"
 #include "util/color.h"
@@ -54,6 +55,16 @@ struct perf_report {
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 };
 
+static int perf_report_config(const char *var, const char *value, void *cb)
+{
+       if (!strcmp(var, "report.group")) {
+               symbol_conf.event_group = perf_config_bool(var, value);
+               return 0;
+       }
+
+       return perf_default_config(var, value, cb);
+}
+
 static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
                                        struct addr_location *al,
                                        struct perf_sample *sample,
@@ -299,6 +310,21 @@ static size_t hists__fprintf_nr_sample_events(struct hists *self,
        char unit;
        unsigned long nr_samples = self->stats.nr_events[PERF_RECORD_SAMPLE];
        u64 nr_events = self->stats.total_period;
+       struct perf_evsel *evsel = hists_to_evsel(self);
+       char buf[512];
+       size_t size = sizeof(buf);
+
+       if (symbol_conf.event_group && evsel->nr_members > 1) {
+               struct perf_evsel *pos;
+
+               perf_evsel__group_desc(evsel, buf, size);
+               evname = buf;
+
+               for_each_group_member(pos, evsel) {
+                       nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
+                       nr_events += pos->hists.stats.total_period;
+               }
+       }
 
        nr_samples = convert_unit(nr_samples, &unit);
        ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
@@ -319,6 +345,10 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
                struct hists *hists = &pos->hists;
                const char *evname = perf_evsel__name(pos);
 
+               if (symbol_conf.event_group &&
+                   !perf_evsel__is_group_leader(pos))
+                       continue;
+
                hists__fprintf_nr_sample_events(hists, evname, stdout);
                hists__fprintf(hists, true, 0, 0, stdout);
                fprintf(stdout, "\n\n");
@@ -372,7 +402,7 @@ static int __cmd_report(struct perf_report *rep)
        if (ret)
                goto out_delete;
 
-       kernel_map = session->host_machine.vmlinux_maps[MAP__FUNCTION];
+       kernel_map = session->machines.host.vmlinux_maps[MAP__FUNCTION];
        kernel_kmap = map__kmap(kernel_map);
        if (kernel_map == NULL ||
            (kernel_map->dso->hit &&
@@ -416,8 +446,16 @@ static int __cmd_report(struct perf_report *rep)
                        hists->symbol_filter_str = rep->symbol_filter_str;
 
                hists__collapse_resort(hists);
-               hists__output_resort(hists);
                nr_samples += hists->stats.nr_events[PERF_RECORD_SAMPLE];
+
+               /* Non-group events are considered as leader */
+               if (symbol_conf.event_group &&
+                   !perf_evsel__is_group_leader(pos)) {
+                       struct hists *leader_hists = &pos->leader->hists;
+
+                       hists__match(leader_hists, hists);
+                       hists__link(leader_hists, hists);
+               }
        }
 
        if (nr_samples == 0) {
@@ -425,11 +463,22 @@ static int __cmd_report(struct perf_report *rep)
                goto out_delete;
        }
 
+       list_for_each_entry(pos, &session->evlist->entries, node)
+               hists__output_resort(&pos->hists);
+
        if (use_browser > 0) {
                if (use_browser == 1) {
-                       perf_evlist__tui_browse_hists(session->evlist, help,
-                                                     NULL,
-                                                     &session->header.env);
+                       ret = perf_evlist__tui_browse_hists(session->evlist,
+                                                       help,
+                                                       NULL,
+                                                       &session->header.env);
+                       /*
+                        * Usually "ret" is the last pressed key, and we only
+                        * care if the key notifies us to switch data file.
+                        */
+                       if (ret != K_SWITCH_INPUT_DATA)
+                               ret = 0;
+
                } else if (use_browser == 2) {
                        perf_evlist__gtk_browse_hists(session->evlist, help,
                                                      NULL);
@@ -595,8 +644,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN(0, "stdio", &report.use_stdio,
                    "Use the stdio interface"),
        OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
-                  "sort by key(s): pid, comm, dso, symbol, parent, dso_to,"
-                  " dso_from, symbol_to, symbol_from, mispredict"),
+                  "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline,"
+                  " dso_to, dso_from, symbol_to, symbol_from, mispredict"),
        OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
                    "Show sample percentage for different cpu modes"),
        OPT_STRING('p', "parent", &parent_pattern, "regex",
@@ -638,6 +687,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
        OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
                    "Show a column with the sum of periods"),
+       OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
+                   "Show event group information together"),
        OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "",
                    "use branch records for histogram filling", parse_branch_mode),
        OPT_STRING(0, "objdump", &objdump_path, "path",
@@ -645,6 +696,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_END()
        };
 
+       perf_config(perf_report_config, NULL);
+
        argc = parse_options(argc, argv, options, report_usage, 0);
 
        if (report.use_stdio)
@@ -663,6 +716,16 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                else
                        input_name = "perf.data";
        }
+
+       if (strcmp(input_name, "-") != 0)
+               setup_browser(true);
+       else {
+               use_browser = 0;
+               perf_hpp__column_enable(PERF_HPP__OVERHEAD);
+               perf_hpp__init();
+       }
+
+repeat:
        session = perf_session__new(input_name, O_RDONLY,
                                    report.force, false, &report.tool);
        if (session == NULL)
@@ -688,14 +751,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
 
        }
 
-       if (strcmp(input_name, "-") != 0)
-               setup_browser(true);
-       else {
-               use_browser = 0;
-               perf_hpp__init();
-       }
-
-       setup_sorting(report_usage, options);
+       if (setup_sorting() < 0)
+               usage_with_options(report_usage, options);
 
        /*
         * Only in the newt browser we are doing integrated annotation,
@@ -763,6 +820,12 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
        }
 
        ret = __cmd_report(&report);
+       if (ret == K_SWITCH_INPUT_DATA) {
+               perf_session__delete(session);
+               goto repeat;
+       } else
+               ret = 0;
+
 error:
        perf_session__delete(session);
        return ret;
index cc28b85dabd5a6d6c5cd70d92c726f1fe7fb33f6..138229439a93f3c8527f3da1cd319779fe287fe7 100644 (file)
@@ -1475,9 +1475,9 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
                        goto out_delete;
                }
 
-               sched->nr_events      = session->hists.stats.nr_events[0];
-               sched->nr_lost_events = session->hists.stats.total_lost;
-               sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
+               sched->nr_events      = session->stats.nr_events[0];
+               sched->nr_lost_events = session->stats.total_lost;
+               sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
        }
 
        if (destroy)
index b363e7b292b26fa2ff41bd5a86977ec8a7f8e3c4..92d4658f56fb55a93415dbccb441c552724b0402 100644 (file)
@@ -692,7 +692,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
                            const char *arg, int unset __maybe_unused)
 {
        char *tok;
-       int i, imax = sizeof(all_output_options) / sizeof(struct output_option);
+       int i, imax = ARRAY_SIZE(all_output_options);
        int j;
        int rc = 0;
        char *str = strdup(arg);
@@ -909,18 +909,6 @@ static const char *ends_with(const char *str, const char *suffix)
        return NULL;
 }
 
-static char *ltrim(char *str)
-{
-       int len = strlen(str);
-
-       while (len && isspace(*str)) {
-               len--;
-               str++;
-       }
-
-       return str;
-}
-
 static int read_script_info(struct script_desc *desc, const char *filename)
 {
        char line[BUFSIZ], *p;
@@ -1487,7 +1475,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                        return -1;
        }
 
-       perf_session__fprintf_info(session, stdout, show_full_info);
+       if (!script_name && !generate_script_lang)
+               perf_session__fprintf_info(session, stdout, show_full_info);
 
        if (!no_callchain)
                symbol_conf.use_callchain = true;
index c247faca7127c19e2ab7eae99be56310a35c7bb1..99848761f573883e320f2db06071c5c16984b123 100644 (file)
 #define CNTR_NOT_SUPPORTED     "<not supported>"
 #define CNTR_NOT_COUNTED       "<not counted>"
 
+static void print_stat(int argc, const char **argv);
+static void print_counter_aggr(struct perf_evsel *counter, char *prefix);
+static void print_counter(struct perf_evsel *counter, char *prefix);
+static void print_aggr_socket(char *prefix);
+
 static struct perf_evlist      *evsel_list;
 
 static struct perf_target      target = {
@@ -75,6 +80,7 @@ static int                    run_count                       =  1;
 static bool                    no_inherit                      = false;
 static bool                    scale                           =  true;
 static bool                    no_aggr                         = false;
+static bool                    aggr_socket                     = false;
 static pid_t                   child_pid                       = -1;
 static bool                    null_run                        =  false;
 static int                     detailed_run                    =  0;
@@ -87,6 +93,9 @@ static FILE                   *output                         = NULL;
 static const char              *pre_cmd                        = NULL;
 static const char              *post_cmd                       = NULL;
 static bool                    sync_run                        = false;
+static unsigned int            interval                        = 0;
+static struct timespec         ref_time;
+static struct cpu_map          *sock_map;
 
 static volatile int done = 0;
 
@@ -94,6 +103,28 @@ struct perf_stat {
        struct stats      res_stats[3];
 };
 
+static inline void diff_timespec(struct timespec *r, struct timespec *a,
+                                struct timespec *b)
+{
+       r->tv_sec = a->tv_sec - b->tv_sec;
+       if (a->tv_nsec < b->tv_nsec) {
+               r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
+               r->tv_sec--;
+       } else {
+               r->tv_nsec = a->tv_nsec - b->tv_nsec ;
+       }
+}
+
+static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
+{
+       return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
+}
+
+static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
+{
+       return perf_evsel__cpus(evsel)->nr;
+}
+
 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
 {
        evsel->priv = zalloc(sizeof(struct perf_stat));
@@ -106,14 +137,27 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
        evsel->priv = NULL;
 }
 
-static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
+static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
 {
-       return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
+       void *addr;
+       size_t sz;
+
+       sz = sizeof(*evsel->counts) +
+            (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values));
+
+       addr = zalloc(sz);
+       if (!addr)
+               return -ENOMEM;
+
+       evsel->prev_raw_counts =  addr;
+
+       return 0;
 }
 
-static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
+static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
 {
-       return perf_evsel__cpus(evsel)->nr;
+       free(evsel->prev_raw_counts);
+       evsel->prev_raw_counts = NULL;
 }
 
 static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
@@ -132,8 +176,6 @@ static struct stats walltime_nsecs_stats;
 static int create_perf_stat_counter(struct perf_evsel *evsel)
 {
        struct perf_event_attr *attr = &evsel->attr;
-       bool exclude_guest_missing = false;
-       int ret;
 
        if (scale)
                attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -141,38 +183,16 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
 
        attr->inherit = !no_inherit;
 
-retry:
-       if (exclude_guest_missing)
-               evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
-
-       if (perf_target__has_cpu(&target)) {
-               ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
-               if (ret)
-                       goto check_ret;
-               return 0;
-       }
+       if (perf_target__has_cpu(&target))
+               return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
 
        if (!perf_target__has_task(&target) &&
-           !perf_evsel__is_group_member(evsel)) {
+           perf_evsel__is_group_leader(evsel)) {
                attr->disabled = 1;
                attr->enable_on_exec = 1;
        }
 
-       ret = perf_evsel__open_per_thread(evsel, evsel_list->threads);
-       if (!ret)
-               return 0;
-       /* fall through */
-check_ret:
-       if (ret && errno == EINVAL) {
-               if (!exclude_guest_missing &&
-                   (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
-                       pr_debug("Old kernel, cannot exclude "
-                                "guest or host samples.\n");
-                       exclude_guest_missing = true;
-                       goto retry;
-               }
-       }
-       return ret;
+       return perf_evsel__open_per_thread(evsel, evsel_list->threads);
 }
 
 /*
@@ -269,15 +289,79 @@ static int read_counter(struct perf_evsel *counter)
        return 0;
 }
 
+static void print_interval(void)
+{
+       static int num_print_interval;
+       struct perf_evsel *counter;
+       struct perf_stat *ps;
+       struct timespec ts, rs;
+       char prefix[64];
+
+       if (no_aggr) {
+               list_for_each_entry(counter, &evsel_list->entries, node) {
+                       ps = counter->priv;
+                       memset(ps->res_stats, 0, sizeof(ps->res_stats));
+                       read_counter(counter);
+               }
+       } else {
+               list_for_each_entry(counter, &evsel_list->entries, node) {
+                       ps = counter->priv;
+                       memset(ps->res_stats, 0, sizeof(ps->res_stats));
+                       read_counter_aggr(counter);
+               }
+       }
+       clock_gettime(CLOCK_MONOTONIC, &ts);
+       diff_timespec(&rs, &ts, &ref_time);
+       sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep);
+
+       if (num_print_interval == 0 && !csv_output) {
+               if (aggr_socket)
+                       fprintf(output, "#           time socket cpus             counts events\n");
+               else if (no_aggr)
+                       fprintf(output, "#           time CPU                 counts events\n");
+               else
+                       fprintf(output, "#           time             counts events\n");
+       }
+
+       if (++num_print_interval == 25)
+               num_print_interval = 0;
+
+       if (aggr_socket)
+               print_aggr_socket(prefix);
+       else if (no_aggr) {
+               list_for_each_entry(counter, &evsel_list->entries, node)
+                       print_counter(counter, prefix);
+       } else {
+               list_for_each_entry(counter, &evsel_list->entries, node)
+                       print_counter_aggr(counter, prefix);
+       }
+}
+
 static int __run_perf_stat(int argc __maybe_unused, const char **argv)
 {
+       char msg[512];
        unsigned long long t0, t1;
        struct perf_evsel *counter;
+       struct timespec ts;
        int status = 0;
        int child_ready_pipe[2], go_pipe[2];
        const bool forks = (argc > 0);
        char buf;
 
+       if (interval) {
+               ts.tv_sec  = interval / 1000;
+               ts.tv_nsec = (interval % 1000) * 1000000;
+       } else {
+               ts.tv_sec  = 1;
+               ts.tv_nsec = 0;
+       }
+
+       if (aggr_socket
+           && cpu_map__build_socket_map(evsel_list->cpus, &sock_map)) {
+               perror("cannot build socket map");
+               return -1;
+       }
+
        if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
                perror("failed to create pipes");
                return -1;
@@ -348,20 +432,13 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
                                continue;
                        }
 
-                       if (errno == EPERM || errno == EACCES) {
-                               error("You may not have permission to collect %sstats.\n"
-                                     "\t Consider tweaking"
-                                     " /proc/sys/kernel/perf_event_paranoid or running as root.",
-                                     target.system_wide ? "system-wide " : "");
-                       } else {
-                               error("open_counter returned with %d (%s). "
-                                     "/bin/dmesg may provide additional information.\n",
-                                      errno, strerror(errno));
-                       }
+                       perf_evsel__open_strerror(counter, &target,
+                                                 errno, msg, sizeof(msg));
+                       ui__error("%s\n", msg);
+
                        if (child_pid != -1)
                                kill(child_pid, SIGTERM);
 
-                       pr_err("Not all events could be opened.\n");
                        return -1;
                }
                counter->supported = true;
@@ -377,14 +454,25 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
         * Enable counters and exec the command:
         */
        t0 = rdclock();
+       clock_gettime(CLOCK_MONOTONIC, &ref_time);
 
        if (forks) {
                close(go_pipe[1]);
+               if (interval) {
+                       while (!waitpid(child_pid, &status, WNOHANG)) {
+                               nanosleep(&ts, NULL);
+                               print_interval();
+                       }
+               }
                wait(&status);
                if (WIFSIGNALED(status))
                        psignal(WTERMSIG(status), argv[0]);
        } else {
-               while(!done) sleep(1);
+               while (!done) {
+                       nanosleep(&ts, NULL);
+                       if (interval)
+                               print_interval();
+               }
        }
 
        t1 = rdclock();
@@ -454,13 +542,21 @@ static void print_noise(struct perf_evsel *evsel, double avg)
        print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
 }
 
-static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
+static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
 {
        double msecs = avg / 1e6;
        char cpustr[16] = { '\0', };
        const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
 
-       if (no_aggr)
+       if (aggr_socket)
+               sprintf(cpustr, "S%*d%s%*d%s",
+                       csv_output ? 0 : -5,
+                       cpu,
+                       csv_sep,
+                       csv_output ? 0 : 4,
+                       nr,
+                       csv_sep);
+       else if (no_aggr)
                sprintf(cpustr, "CPU%*d%s",
                        csv_output ? 0 : -4,
                        perf_evsel__cpus(evsel)->map[cpu], csv_sep);
@@ -470,7 +566,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
        if (evsel->cgrp)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
 
-       if (csv_output)
+       if (csv_output || interval)
                return;
 
        if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
@@ -659,7 +755,7 @@ static void print_ll_cache_misses(int cpu,
        fprintf(output, " of all LL-cache hits   ");
 }
 
-static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
+static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
 {
        double total, ratio = 0.0;
        char cpustr[16] = { '\0', };
@@ -672,7 +768,15 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
        else
                fmt = "%s%18.0f%s%-25s";
 
-       if (no_aggr)
+       if (aggr_socket)
+               sprintf(cpustr, "S%*d%s%*d%s",
+                       csv_output ? 0 : -5,
+                       cpu,
+                       csv_sep,
+                       csv_output ? 0 : 4,
+                       nr,
+                       csv_sep);
+       else if (no_aggr)
                sprintf(cpustr, "CPU%*d%s",
                        csv_output ? 0 : -4,
                        perf_evsel__cpus(evsel)->map[cpu], csv_sep);
@@ -684,12 +788,11 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
        if (evsel->cgrp)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
 
-       if (csv_output)
+       if (csv_output || interval)
                return;
 
        if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
                total = avg_stats(&runtime_cycles_stats[cpu]);
-
                if (total)
                        ratio = avg / total;
 
@@ -779,16 +882,83 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
        }
 }
 
+static void print_aggr_socket(char *prefix)
+{
+       struct perf_evsel *counter;
+       u64 ena, run, val;
+       int cpu, s, s2, sock, nr;
+
+       if (!sock_map)
+               return;
+
+       for (s = 0; s < sock_map->nr; s++) {
+               sock = cpu_map__socket(sock_map, s);
+               list_for_each_entry(counter, &evsel_list->entries, node) {
+                       val = ena = run = 0;
+                       nr = 0;
+                       for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+                               s2 = cpu_map__get_socket(evsel_list->cpus, cpu);
+                               if (s2 != sock)
+                                       continue;
+                               val += counter->counts->cpu[cpu].val;
+                               ena += counter->counts->cpu[cpu].ena;
+                               run += counter->counts->cpu[cpu].run;
+                               nr++;
+                       }
+                       if (prefix)
+                               fprintf(output, "%s", prefix);
+
+                       if (run == 0 || ena == 0) {
+                               fprintf(output, "S%*d%s%*d%s%*s%s%*s",
+                                       csv_output ? 0 : -5,
+                                       s,
+                                       csv_sep,
+                                       csv_output ? 0 : 4,
+                                       nr,
+                                       csv_sep,
+                                       csv_output ? 0 : 18,
+                                       counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
+                                       csv_sep,
+                                       csv_output ? 0 : -24,
+                                       perf_evsel__name(counter));
+                               if (counter->cgrp)
+                                       fprintf(output, "%s%s",
+                                               csv_sep, counter->cgrp->name);
+
+                               fputc('\n', output);
+                               continue;
+                       }
+
+                       if (nsec_counter(counter))
+                               nsec_printout(sock, nr, counter, val);
+                       else
+                               abs_printout(sock, nr, counter, val);
+
+                       if (!csv_output) {
+                               print_noise(counter, 1.0);
+
+                               if (run != ena)
+                                       fprintf(output, "  (%.2f%%)",
+                                               100.0 * run / ena);
+                       }
+                       fputc('\n', output);
+               }
+       }
+}
+
 /*
  * Print out the results of a single counter:
  * aggregated counts in system-wide mode
  */
-static void print_counter_aggr(struct perf_evsel *counter)
+static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
 {
        struct perf_stat *ps = counter->priv;
        double avg = avg_stats(&ps->res_stats[0]);
        int scaled = counter->counts->scaled;
 
+       if (prefix)
+               fprintf(output, "%s", prefix);
+
        if (scaled == -1) {
                fprintf(output, "%*s%s%*s",
                        csv_output ? 0 : 18,
@@ -805,9 +975,9 @@ static void print_counter_aggr(struct perf_evsel *counter)
        }
 
        if (nsec_counter(counter))
-               nsec_printout(-1, counter, avg);
+               nsec_printout(-1, 0, counter, avg);
        else
-               abs_printout(-1, counter, avg);
+               abs_printout(-1, 0, counter, avg);
 
        print_noise(counter, avg);
 
@@ -831,7 +1001,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
  * Print out the results of a single counter:
  * does not use aggregated count in system-wide
  */
-static void print_counter(struct perf_evsel *counter)
+static void print_counter(struct perf_evsel *counter, char *prefix)
 {
        u64 ena, run, val;
        int cpu;
@@ -840,6 +1010,10 @@ static void print_counter(struct perf_evsel *counter)
                val = counter->counts->cpu[cpu].val;
                ena = counter->counts->cpu[cpu].ena;
                run = counter->counts->cpu[cpu].run;
+
+               if (prefix)
+                       fprintf(output, "%s", prefix);
+
                if (run == 0 || ena == 0) {
                        fprintf(output, "CPU%*d%s%*s%s%*s",
                                csv_output ? 0 : -4,
@@ -859,9 +1033,9 @@ static void print_counter(struct perf_evsel *counter)
                }
 
                if (nsec_counter(counter))
-                       nsec_printout(cpu, counter, val);
+                       nsec_printout(cpu, 0, counter, val);
                else
-                       abs_printout(cpu, counter, val);
+                       abs_printout(cpu, 0, counter, val);
 
                if (!csv_output) {
                        print_noise(counter, 1.0);
@@ -899,12 +1073,14 @@ static void print_stat(int argc, const char **argv)
                fprintf(output, ":\n\n");
        }
 
-       if (no_aggr) {
+       if (aggr_socket)
+               print_aggr_socket(NULL);
+       else if (no_aggr) {
                list_for_each_entry(counter, &evsel_list->entries, node)
-                       print_counter(counter);
+                       print_counter(counter, NULL);
        } else {
                list_for_each_entry(counter, &evsel_list->entries, node)
-                       print_counter_aggr(counter);
+                       print_counter_aggr(counter, NULL);
        }
 
        if (!csv_output) {
@@ -925,7 +1101,7 @@ static volatile int signr = -1;
 
 static void skip_signal(int signo)
 {
-       if(child_pid == -1)
+       if ((child_pid == -1) || interval)
                done = 1;
 
        signr = signo;
@@ -1145,6 +1321,9 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                        "command to run prior to the measured command"),
        OPT_STRING(0, "post", &post_cmd, "command",
                        "command to run after to the measured command"),
+       OPT_UINTEGER('I', "interval-print", &interval,
+                   "print counts at regular interval in ms (>= 100)"),
+       OPT_BOOLEAN(0, "aggr-socket", &aggr_socket, "aggregate counts per processor socket"),
        OPT_END()
        };
        const char * const stat_usage[] = {
@@ -1231,6 +1410,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                usage_with_options(stat_usage, options);
        }
 
+       if (aggr_socket) {
+               if (!perf_target__has_cpu(&target)) {
+                       fprintf(stderr, "--aggr-socket only available in system-wide mode (-a)\n");
+                       usage_with_options(stat_usage, options);
+               }
+               no_aggr = true;
+       }
+
        if (add_default_attributes())
                goto out;
 
@@ -1245,12 +1432,23 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                usage_with_options(stat_usage, options);
                return -1;
        }
+       if (interval && interval < 100) {
+               pr_err("print interval must be >= 100ms\n");
+               usage_with_options(stat_usage, options);
+               return -1;
+       }
 
        list_for_each_entry(pos, &evsel_list->entries, node) {
                if (perf_evsel__alloc_stat_priv(pos) < 0 ||
                    perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0)
                        goto out_free_fd;
        }
+       if (interval) {
+               list_for_each_entry(pos, &evsel_list->entries, node) {
+                       if (perf_evsel__alloc_prev_raw_counts(pos) < 0)
+                               goto out_free_fd;
+               }
+       }
 
        /*
         * We dont want to block the signals - that would cause
@@ -1260,6 +1458,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
         */
        atexit(sig_atexit);
        signal(SIGINT,  skip_signal);
+       signal(SIGCHLD, skip_signal);
        signal(SIGALRM, skip_signal);
        signal(SIGABRT, skip_signal);
 
@@ -1272,11 +1471,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                status = run_perf_stat(argc, argv);
        }
 
-       if (status != -1)
+       if (status != -1 && !interval)
                print_stat(argc, argv);
 out_free_fd:
-       list_for_each_entry(pos, &evsel_list->entries, node)
+       list_for_each_entry(pos, &evsel_list->entries, node) {
                perf_evsel__free_stat_priv(pos);
+               perf_evsel__free_counts(pos);
+               perf_evsel__free_prev_raw_counts(pos);
+       }
        perf_evlist__delete_maps(evsel_list);
 out:
        perf_evlist__delete(evsel_list);
index c9ff3950cd4be55a487ff6cbda76b4f55c47ad5d..72f6eb7b4173c86f84fcf3b001f6eb9ae1bdd8b4 100644 (file)
 #include <linux/unistd.h>
 #include <linux/types.h>
 
-void get_term_dimensions(struct winsize *ws)
-{
-       char *s = getenv("LINES");
-
-       if (s != NULL) {
-               ws->ws_row = atoi(s);
-               s = getenv("COLUMNS");
-               if (s != NULL) {
-                       ws->ws_col = atoi(s);
-                       if (ws->ws_row && ws->ws_col)
-                               return;
-               }
-       }
-#ifdef TIOCGWINSZ
-       if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
-           ws->ws_row && ws->ws_col)
-               return;
-#endif
-       ws->ws_row = 25;
-       ws->ws_col = 80;
-}
+static volatile int done;
 
 static void perf_top__update_print_entries(struct perf_top *top)
 {
@@ -453,8 +433,10 @@ static int perf_top__key_mapped(struct perf_top *top, int c)
        return 0;
 }
 
-static void perf_top__handle_keypress(struct perf_top *top, int c)
+static bool perf_top__handle_keypress(struct perf_top *top, int c)
 {
+       bool ret = true;
+
        if (!perf_top__key_mapped(top, c)) {
                struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
                struct termios tc, save;
@@ -475,7 +457,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
 
                tcsetattr(0, TCSAFLUSH, &save);
                if (!perf_top__key_mapped(top, c))
-                       return;
+                       return ret;
        }
 
        switch (c) {
@@ -537,7 +519,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
                        printf("exiting.\n");
                        if (top->dump_symtab)
                                perf_session__fprintf_dsos(top->session, stderr);
-                       exit(0);
+                       ret = false;
+                       break;
                case 's':
                        perf_top__prompt_symbol(top, "Enter details symbol");
                        break;
@@ -560,6 +543,8 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
                default:
                        break;
        }
+
+       return ret;
 }
 
 static void perf_top__sort_new_samples(void *arg)
@@ -596,13 +581,12 @@ static void *display_thread_tui(void *arg)
         * via --uid.
         */
        list_for_each_entry(pos, &top->evlist->entries, node)
-               pos->hists.uid_filter_str = top->target.uid_str;
+               pos->hists.uid_filter_str = top->record_opts.target.uid_str;
 
        perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
                                      &top->session->header.env);
 
-       exit_browser(0);
-       exit(0);
+       done = 1;
        return NULL;
 }
 
@@ -626,7 +610,7 @@ repeat:
        /* trash return*/
        getc(stdin);
 
-       while (1) {
+       while (!done) {
                perf_top__print_sym_table(top);
                /*
                 * Either timeout expired or we got an EINTR due to SIGWINCH,
@@ -640,15 +624,14 @@ repeat:
                                continue;
                        /* Fall trhu */
                default:
-                       goto process_hotkey;
+                       c = getc(stdin);
+                       tcsetattr(0, TCSAFLUSH, &save);
+
+                       if (perf_top__handle_keypress(top, c))
+                               goto repeat;
+                       done = 1;
                }
        }
-process_hotkey:
-       c = getc(stdin);
-       tcsetattr(0, TCSAFLUSH, &save);
-
-       perf_top__handle_keypress(top, c);
-       goto repeat;
 
        return NULL;
 }
@@ -716,7 +699,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
                static struct intlist *seen;
 
                if (!seen)
-                       seen = intlist__new();
+                       seen = intlist__new(NULL);
 
                if (!intlist__has_entry(seen, event->ip.pid)) {
                        pr_err("Can't find guest [%d]'s kernel information\n",
@@ -727,8 +710,8 @@ static void perf_event__process_sample(struct perf_tool *tool,
        }
 
        if (!machine) {
-               pr_err("%u unprocessable samples recorded.",
-                      top->session->hists.stats.nr_unprocessable_samples++);
+               pr_err("%u unprocessable samples recorded.\r",
+                      top->session->stats.nr_unprocessable_samples++);
                return;
        }
 
@@ -847,13 +830,13 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                        ++top->us_samples;
                        if (top->hide_user_symbols)
                                continue;
-                       machine = perf_session__find_host_machine(session);
+                       machine = &session->machines.host;
                        break;
                case PERF_RECORD_MISC_KERNEL:
                        ++top->kernel_samples;
                        if (top->hide_kernel_symbols)
                                continue;
-                       machine = perf_session__find_host_machine(session);
+                       machine = &session->machines.host;
                        break;
                case PERF_RECORD_MISC_GUEST_KERNEL:
                        ++top->guest_kernel_samples;
@@ -878,7 +861,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
                        hists__inc_nr_events(&evsel->hists, event->header.type);
                        machine__process_event(machine, event);
                } else
-                       ++session->hists.stats.nr_unknown_events;
+                       ++session->stats.nr_unknown_events;
        }
 }
 
@@ -890,123 +873,42 @@ static void perf_top__mmap_read(struct perf_top *top)
                perf_top__mmap_read_idx(top, i);
 }
 
-static void perf_top__start_counters(struct perf_top *top)
+static int perf_top__start_counters(struct perf_top *top)
 {
+       char msg[512];
        struct perf_evsel *counter;
        struct perf_evlist *evlist = top->evlist;
+       struct perf_record_opts *opts = &top->record_opts;
 
-       if (top->group)
-               perf_evlist__set_leader(evlist);
+       perf_evlist__config(evlist, opts);
 
        list_for_each_entry(counter, &evlist->entries, node) {
-               struct perf_event_attr *attr = &counter->attr;
-
-               attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
-
-               if (top->freq) {
-                       attr->sample_type |= PERF_SAMPLE_PERIOD;
-                       attr->freq        = 1;
-                       attr->sample_freq = top->freq;
-               }
-
-               if (evlist->nr_entries > 1) {
-                       attr->sample_type |= PERF_SAMPLE_ID;
-                       attr->read_format |= PERF_FORMAT_ID;
-               }
-
-               if (perf_target__has_cpu(&top->target))
-                       attr->sample_type |= PERF_SAMPLE_CPU;
-
-               if (symbol_conf.use_callchain)
-                       attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
-
-               attr->mmap = 1;
-               attr->comm = 1;
-               attr->inherit = top->inherit;
-fallback_missing_features:
-               if (top->exclude_guest_missing)
-                       attr->exclude_guest = attr->exclude_host = 0;
-retry_sample_id:
-               attr->sample_id_all = top->sample_id_all_missing ? 0 : 1;
 try_again:
                if (perf_evsel__open(counter, top->evlist->cpus,
                                     top->evlist->threads) < 0) {
-                       int err = errno;
-
-                       if (err == EPERM || err == EACCES) {
-                               ui__error_paranoid();
-                               goto out_err;
-                       } else if (err == EINVAL) {
-                               if (!top->exclude_guest_missing &&
-                                   (attr->exclude_guest || attr->exclude_host)) {
-                                       pr_debug("Old kernel, cannot exclude "
-                                                "guest or host samples.\n");
-                                       top->exclude_guest_missing = true;
-                                       goto fallback_missing_features;
-                               } else if (!top->sample_id_all_missing) {
-                                       /*
-                                        * Old kernel, no attr->sample_id_type_all field
-                                        */
-                                       top->sample_id_all_missing = true;
-                                       goto retry_sample_id;
-                               }
-                       }
-                       /*
-                        * If it's cycles then fall back to hrtimer
-                        * based cpu-clock-tick sw counter, which
-                        * is always available even if no PMU support:
-                        */
-                       if ((err == ENOENT || err == ENXIO) &&
-                           (attr->type == PERF_TYPE_HARDWARE) &&
-                           (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
-
+                       if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
                                if (verbose)
-                                       ui__warning("Cycles event not supported,\n"
-                                                   "trying to fall back to cpu-clock-ticks\n");
-
-                               attr->type = PERF_TYPE_SOFTWARE;
-                               attr->config = PERF_COUNT_SW_CPU_CLOCK;
-                               if (counter->name) {
-                                       free(counter->name);
-                                       counter->name = NULL;
-                               }
+                                       ui__warning("%s\n", msg);
                                goto try_again;
                        }
 
-                       if (err == ENOENT) {
-                               ui__error("The %s event is not supported.\n",
-                                         perf_evsel__name(counter));
-                               goto out_err;
-                       } else if (err == EMFILE) {
-                               ui__error("Too many events are opened.\n"
-                                           "Try again after reducing the number of events\n");
-                               goto out_err;
-                       } else if ((err == EOPNOTSUPP) && (attr->precise_ip)) {
-                               ui__error("\'precise\' request may not be supported. "
-                                         "Try removing 'p' modifier\n");
-                               goto out_err;
-                       }
-
-                       ui__error("The sys_perf_event_open() syscall "
-                                   "returned with %d (%s).  /bin/dmesg "
-                                   "may provide additional information.\n"
-                                   "No CONFIG_PERF_EVENTS=y kernel support "
-                                   "configured?\n", err, strerror(err));
+                       perf_evsel__open_strerror(counter, &opts->target,
+                                                 errno, msg, sizeof(msg));
+                       ui__error("%s\n", msg);
                        goto out_err;
                }
        }
 
-       if (perf_evlist__mmap(evlist, top->mmap_pages, false) < 0) {
+       if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
                ui__error("Failed to mmap with %d (%s)\n",
                            errno, strerror(errno));
                goto out_err;
        }
 
-       return;
+       return 0;
 
 out_err:
-       exit_browser(0);
-       exit(0);
+       return -1;
 }
 
 static int perf_top__setup_sample_type(struct perf_top *top)
@@ -1016,7 +918,7 @@ static int perf_top__setup_sample_type(struct perf_top *top)
                        ui__error("Selected -g but \"sym\" not present in --sort/-s.");
                        return -EINVAL;
                }
-       } else if (!top->dont_use_callchains && callchain_param.mode != CHAIN_NONE) {
+       } else if (callchain_param.mode != CHAIN_NONE) {
                if (callchain_register_param(&callchain_param) < 0) {
                        ui__error("Can't register callchain params.\n");
                        return -EINVAL;
@@ -1028,6 +930,7 @@ static int perf_top__setup_sample_type(struct perf_top *top)
 
 static int __cmd_top(struct perf_top *top)
 {
+       struct perf_record_opts *opts = &top->record_opts;
        pthread_t thread;
        int ret;
        /*
@@ -1042,26 +945,42 @@ static int __cmd_top(struct perf_top *top)
        if (ret)
                goto out_delete;
 
-       if (perf_target__has_task(&top->target))
+       if (perf_target__has_task(&opts->target))
                perf_event__synthesize_thread_map(&top->tool, top->evlist->threads,
                                                  perf_event__process,
-                                                 &top->session->host_machine);
+                                                 &top->session->machines.host);
        else
                perf_event__synthesize_threads(&top->tool, perf_event__process,
-                                              &top->session->host_machine);
-       perf_top__start_counters(top);
+                                              &top->session->machines.host);
+
+       ret = perf_top__start_counters(top);
+       if (ret)
+               goto out_delete;
+
        top->session->evlist = top->evlist;
        perf_session__set_id_hdr_size(top->session);
 
+       /*
+        * When perf is starting the traced process, all the events (apart from
+        * group members) have enable_on_exec=1 set, so don't spoil it by
+        * prematurely enabling them.
+        *
+        * XXX 'top' still doesn't start workloads like record, trace, but should,
+        * so leave the check here.
+        */
+        if (!perf_target__none(&opts->target))
+                perf_evlist__enable(top->evlist);
+
        /* Wait for a minimal set of events before starting the snapshot */
        poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
 
        perf_top__mmap_read(top);
 
+       ret = -1;
        if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
                                                            display_thread), top)) {
                ui__error("Could not create display thread.\n");
-               exit(-1);
+               goto out_delete;
        }
 
        if (top->realtime_prio) {
@@ -1070,11 +989,11 @@ static int __cmd_top(struct perf_top *top)
                param.sched_priority = top->realtime_prio;
                if (sched_setscheduler(0, SCHED_FIFO, &param)) {
                        ui__error("Could not set realtime priority.\n");
-                       exit(-1);
+                       goto out_delete;
                }
        }
 
-       while (1) {
+       while (!done) {
                u64 hits = top->samples;
 
                perf_top__mmap_read(top);
@@ -1083,126 +1002,67 @@ static int __cmd_top(struct perf_top *top)
                        ret = poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
        }
 
+       ret = 0;
 out_delete:
        perf_session__delete(top->session);
        top->session = NULL;
 
-       return 0;
+       return ret;
 }
 
 static int
 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
 {
-       struct perf_top *top = (struct perf_top *)opt->value;
-       char *tok, *tok2;
-       char *endptr;
-
        /*
         * --no-call-graph
         */
-       if (unset) {
-               top->dont_use_callchains = true;
+       if (unset)
                return 0;
-       }
 
        symbol_conf.use_callchain = true;
 
-       if (!arg)
-               return 0;
-
-       tok = strtok((char *)arg, ",");
-       if (!tok)
-               return -1;
-
-       /* get the output mode */
-       if (!strncmp(tok, "graph", strlen(arg)))
-               callchain_param.mode = CHAIN_GRAPH_ABS;
-
-       else if (!strncmp(tok, "flat", strlen(arg)))
-               callchain_param.mode = CHAIN_FLAT;
-
-       else if (!strncmp(tok, "fractal", strlen(arg)))
-               callchain_param.mode = CHAIN_GRAPH_REL;
-
-       else if (!strncmp(tok, "none", strlen(arg))) {
-               callchain_param.mode = CHAIN_NONE;
-               symbol_conf.use_callchain = false;
-
-               return 0;
-       } else
-               return -1;
-
-       /* get the min percentage */
-       tok = strtok(NULL, ",");
-       if (!tok)
-               goto setup;
-
-       callchain_param.min_percent = strtod(tok, &endptr);
-       if (tok == endptr)
-               return -1;
-
-       /* get the print limit */
-       tok2 = strtok(NULL, ",");
-       if (!tok2)
-               goto setup;
-
-       if (tok2[0] != 'c') {
-               callchain_param.print_limit = strtod(tok2, &endptr);
-               tok2 = strtok(NULL, ",");
-               if (!tok2)
-                       goto setup;
-       }
-
-       /* get the call chain order */
-       if (!strcmp(tok2, "caller"))
-               callchain_param.order = ORDER_CALLER;
-       else if (!strcmp(tok2, "callee"))
-               callchain_param.order = ORDER_CALLEE;
-       else
-               return -1;
-setup:
-       if (callchain_register_param(&callchain_param) < 0) {
-               fprintf(stderr, "Can't register callchain params\n");
-               return -1;
-       }
-       return 0;
+       return record_parse_callchain_opt(opt, arg, unset);
 }
 
 int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
 {
-       struct perf_evsel *pos;
        int status;
        char errbuf[BUFSIZ];
        struct perf_top top = {
                .count_filter        = 5,
                .delay_secs          = 2,
-               .freq                = 4000, /* 4 KHz */
-               .mmap_pages          = 128,
-               .sym_pcnt_filter     = 5,
-               .target              = {
-                       .uses_mmap   = true,
+               .record_opts = {
+                       .mmap_pages     = UINT_MAX,
+                       .user_freq      = UINT_MAX,
+                       .user_interval  = ULLONG_MAX,
+                       .freq           = 4000, /* 4 KHz */
+                       .target              = {
+                               .uses_mmap   = true,
+                       },
                },
+               .sym_pcnt_filter     = 5,
        };
-       char callchain_default_opt[] = "fractal,0.5,callee";
+       struct perf_record_opts *opts = &top.record_opts;
+       struct perf_target *target = &opts->target;
        const struct option options[] = {
        OPT_CALLBACK('e', "event", &top.evlist, "event",
                     "event selector. use 'perf list' to list available events",
                     parse_events_option),
-       OPT_INTEGER('c', "count", &top.default_interval,
-                   "event period to sample"),
-       OPT_STRING('p', "pid", &top.target.pid, "pid",
+       OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
+       OPT_STRING('p', "pid", &target->pid, "pid",
                    "profile events on existing process id"),
-       OPT_STRING('t', "tid", &top.target.tid, "tid",
+       OPT_STRING('t', "tid", &target->tid, "tid",
                    "profile events on existing thread id"),
-       OPT_BOOLEAN('a', "all-cpus", &top.target.system_wide,
+       OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
                            "system-wide collection from all CPUs"),
-       OPT_STRING('C', "cpu", &top.target.cpu_list, "cpu",
+       OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
                    "list of cpus to monitor"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
                    "hide kernel symbols"),
-       OPT_UINTEGER('m', "mmap-pages", &top.mmap_pages, "number of mmap data pages"),
+       OPT_UINTEGER('m', "mmap-pages", &opts->mmap_pages,
+                    "number of mmap data pages"),
        OPT_INTEGER('r', "realtime", &top.realtime_prio,
                    "collect data with this RT SCHED_FIFO priority"),
        OPT_INTEGER('d', "delay", &top.delay_secs,
@@ -1211,16 +1071,14 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                            "dump the symbol table used for profiling"),
        OPT_INTEGER('f', "count-filter", &top.count_filter,
                    "only display functions with more events than this"),
-       OPT_BOOLEAN('g', "group", &top.group,
+       OPT_BOOLEAN('g', "group", &opts->group,
                            "put the counters into a counter group"),
-       OPT_BOOLEAN('i', "inherit", &top.inherit,
-                   "child tasks inherit counters"),
+       OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
+                   "child tasks do not inherit counters"),
        OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
                    "symbol to annotate"),
-       OPT_BOOLEAN('z', "zero", &top.zero,
-                   "zero history across updates"),
-       OPT_INTEGER('F', "freq", &top.freq,
-                   "profile at this frequency"),
+       OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
+       OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
        OPT_INTEGER('E', "entries", &top.print_entries,
                    "display this many functions"),
        OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
@@ -1233,10 +1091,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                   "sort by key(s): pid, comm, dso, symbol, parent"),
        OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
                    "Show a column with the number of samples"),
-       OPT_CALLBACK_DEFAULT('G', "call-graph", &top, "output_type,min_percent, call_order",
-                    "Display callchains using output_type (graph, flat, fractal, or none), min percent threshold and callchain order. "
-                    "Default: fractal,0.5,callee", &parse_callchain_opt,
-                    callchain_default_opt),
+       OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
+                            "mode[,dump_size]", record_callchain_help,
+                            &parse_callchain_opt, "fp"),
        OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
                    "Show a column with the sum of periods"),
        OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
@@ -1251,7 +1108,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                    "Display raw encoding of assembly instructions (default)"),
        OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
                   "Specify disassembler style (e.g. -M intel for intel syntax)"),
-       OPT_STRING('u', "uid", &top.target.uid_str, "user", "user to profile"),
+       OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
        OPT_END()
        };
        const char * const top_usage[] = {
@@ -1272,7 +1129,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        if (sort_order == default_sort_order)
                sort_order = "dso,symbol";
 
-       setup_sorting(top_usage, options);
+       if (setup_sorting() < 0)
+               usage_with_options(top_usage, options);
 
        if (top.use_stdio)
                use_browser = 0;
@@ -1281,33 +1139,33 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
 
        setup_browser(false);
 
-       status = perf_target__validate(&top.target);
+       status = perf_target__validate(target);
        if (status) {
-               perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
+               perf_target__strerror(target, status, errbuf, BUFSIZ);
                ui__warning("%s", errbuf);
        }
 
-       status = perf_target__parse_uid(&top.target);
+       status = perf_target__parse_uid(target);
        if (status) {
                int saved_errno = errno;
 
-               perf_target__strerror(&top.target, status, errbuf, BUFSIZ);
+               perf_target__strerror(target, status, errbuf, BUFSIZ);
                ui__error("%s", errbuf);
 
                status = -saved_errno;
                goto out_delete_evlist;
        }
 
-       if (perf_target__none(&top.target))
-               top.target.system_wide = true;
+       if (perf_target__none(target))
+               target->system_wide = true;
 
-       if (perf_evlist__create_maps(top.evlist, &top.target) < 0)
+       if (perf_evlist__create_maps(top.evlist, target) < 0)
                usage_with_options(top_usage, options);
 
        if (!top.evlist->nr_entries &&
            perf_evlist__add_default(top.evlist) < 0) {
                ui__error("Not enough memory for event selector list\n");
-               return -ENOMEM;
+               goto out_delete_maps;
        }
 
        symbol_conf.nr_events = top.evlist->nr_entries;
@@ -1315,24 +1173,22 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        if (top.delay_secs < 1)
                top.delay_secs = 1;
 
+       if (opts->user_interval != ULLONG_MAX)
+               opts->default_interval = opts->user_interval;
+       if (opts->user_freq != UINT_MAX)
+               opts->freq = opts->user_freq;
+
        /*
         * User specified count overrides default frequency.
         */
-       if (top.default_interval)
-               top.freq = 0;
-       else if (top.freq) {
-               top.default_interval = top.freq;
+       if (opts->default_interval)
+               opts->freq = 0;
+       else if (opts->freq) {
+               opts->default_interval = opts->freq;
        } else {
                ui__error("frequency and count are zero, aborting\n");
-               exit(EXIT_FAILURE);
-       }
-
-       list_for_each_entry(pos, &top.evlist->entries, node) {
-               /*
-                * Fill in the ones not specifically initialized via -c:
-                */
-               if (!pos->attr.sample_period)
-                       pos->attr.sample_period = top.default_interval;
+               status = -EINVAL;
+               goto out_delete_maps;
        }
 
        top.sym_evsel = perf_evlist__first(top.evlist);
@@ -1365,6 +1221,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
 
        status = __cmd_top(&top);
 
+out_delete_maps:
+       perf_evlist__delete_maps(top.evlist);
 out_delete_evlist:
        perf_evlist__delete(top.evlist);
 
index 7932ffa298894a00694b82c6f26c058acd3760f9..d222d7fc7e960c7b541d507d524b66790a7d93b0 100644 (file)
@@ -455,7 +455,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                goto out_delete_evlist;
        }
 
-       perf_evlist__config_attrs(evlist, &trace->opts);
+       perf_evlist__config(evlist, &trace->opts);
 
        signal(SIGCHLD, sig_handler);
        signal(SIGINT, sig_handler);
index f5ac77485a4f6124c3216c93556fb26c33ae79c9..b4eabb44e381c8d74f95362fb6ece61bdc32bf50 100644 (file)
@@ -225,3 +225,14 @@ int main(void)
        return on_exit(NULL, NULL);
 }
 endef
+
+define SOURCE_LIBNUMA
+#include <numa.h>
+#include <numaif.h>
+
+int main(void)
+{
+       numa_available();
+       return 0;
+}
+endef
\ No newline at end of file
index e5413125e6bbaef823b773b7a5b712d30c89a0b1..8ef3bd30a5492afa002c928b4483e03ac16db790 100644 (file)
@@ -13,7 +13,7 @@ newline := $(newline)
 # what should replace a newline when escaping
 # newlines; the default is a bizarre string.
 #
-nl-escape = $(or $(1),m822df3020w6a44id34bt574ctac44eb9f4n)
+nl-escape = $(if $(1),$(1),m822df3020w6a44id34bt574ctac44eb9f4n)
 
 # escape-nl
 #
@@ -173,9 +173,9 @@ _ge-abspath = $(if $(is-executable),$(1))
 # Usage: absolute-executable-path-or-empty = $(call get-executable-or-default,variable,default)
 #
 define get-executable-or-default
-$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
+$(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2),$(1)))
 endef
-_ge_attempt = $(or $(get-executable),$(_gea_warn),$(call _gea_err,$(2)))
+_ge_attempt = $(if $(get-executable),$(get-executable),$(_gea_warn)$(call _gea_err,$(2)))
 _gea_warn = $(warning The path '$(1)' is not executable.)
 _gea_err  = $(if $(1),$(error Please set '$(1)' appropriately))
 
index 0f661fbce6a8c3d318f20b255a407ed41d54fead..095b88207cd3e8f952b1b9676a0abf57f91dde50 100644 (file)
@@ -328,14 +328,23 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
        if (S_ISFIFO(st.st_mode) || S_ISSOCK(st.st_mode))
                return 0;
 
+       status = 1;
        /* Check for ENOSPC and EIO errors.. */
-       if (fflush(stdout))
-               die("write failure on standard output: %s", strerror(errno));
-       if (ferror(stdout))
-               die("unknown write failure on standard output");
-       if (fclose(stdout))
-               die("close failed on standard output: %s", strerror(errno));
-       return 0;
+       if (fflush(stdout)) {
+               fprintf(stderr, "write failure on standard output: %s", strerror(errno));
+               goto out;
+       }
+       if (ferror(stdout)) {
+               fprintf(stderr, "unknown write failure on standard output");
+               goto out;
+       }
+       if (fclose(stdout)) {
+               fprintf(stderr, "close failed on standard output: %s", strerror(errno));
+               goto out;
+       }
+       status = 0;
+out:
+       return status;
 }
 
 static void handle_internal_command(int argc, const char **argv)
@@ -467,7 +476,8 @@ int main(int argc, const char **argv)
                cmd += 5;
                argv[0] = cmd;
                handle_internal_command(argc, argv);
-               die("cannot handle %s internally", cmd);
+               fprintf(stderr, "cannot handle %s internally", cmd);
+               goto out;
        }
 
        /* Look for flags.. */
@@ -485,7 +495,7 @@ int main(int argc, const char **argv)
                printf("\n usage: %s\n\n", perf_usage_string);
                list_common_cmds_help();
                printf("\n %s\n\n", perf_more_info_string);
-               exit(1);
+               goto out;
        }
        cmd = argv[0];
 
@@ -517,7 +527,7 @@ int main(int argc, const char **argv)
                        fprintf(stderr, "Expansion of alias '%s' failed; "
                                "'%s' is not a perf-command\n",
                                cmd, argv[0]);
-                       exit(1);
+                       goto out;
                }
                if (!done_help) {
                        cmd = argv[0] = help_unknown_cmd(cmd);
@@ -528,6 +538,6 @@ int main(int argc, const char **argv)
 
        fprintf(stderr, "Failed to run command '%s': %s\n",
                cmd, strerror(errno));
-
+out:
        return 1;
 }
index 2c340e7da458e4fa53b2c08914f1acd976fa90c5..c2206c87fc9fefaca6c6a75d35e7210fdd4eb707 100644 (file)
@@ -1,10 +1,6 @@
 #ifndef _PERF_PERF_H
 #define _PERF_PERF_H
 
-struct winsize;
-
-void get_term_dimensions(struct winsize *ws);
-
 #include <asm/unistd.h>
 
 #if defined(__i386__)
@@ -107,32 +103,6 @@ void get_term_dimensions(struct winsize *ws);
 #include "util/types.h"
 #include <stdbool.h>
 
-struct perf_mmap {
-       void                    *base;
-       int                     mask;
-       unsigned int            prev;
-};
-
-static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
-{
-       struct perf_event_mmap_page *pc = mm->base;
-       int head = pc->data_head;
-       rmb();
-       return head;
-}
-
-static inline void perf_mmap__write_tail(struct perf_mmap *md,
-                                        unsigned long tail)
-{
-       struct perf_event_mmap_page *pc = md->base;
-
-       /*
-        * ensure all reads are done before we write the tail out.
-        */
-       /* mb(); */
-       pc->data_tail = tail;
-}
-
 /*
  * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
  * counters in the current task.
@@ -237,8 +207,6 @@ struct perf_record_opts {
        bool         raw_samples;
        bool         sample_address;
        bool         sample_time;
-       bool         sample_id_all_missing;
-       bool         exclude_guest_missing;
        bool         period;
        unsigned int freq;
        unsigned int mmap_pages;
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
deleted file mode 100644 (file)
index 8edda90..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-report b/tools/perf/scripts/perl/bin/workqueue-stats-report
deleted file mode 100644 (file)
index 6d91411..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-# description: workqueue stats (ins/exe/create/destroy)
-perf script $@ -s "$PERF_EXEC_PATH"/scripts/perl/workqueue-stats.pl
index 4bb3ecd33472498b186afe3b809d9431737e4c03..8b20787021c1d211fc709b21ac6b77deaf3a288b 100644 (file)
@@ -17,6 +17,7 @@ use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
 use lib "./Perf-Trace-Util/lib";
 use Perf::Trace::Core;
 use Perf::Trace::Util;
+use POSIX qw/SIGALRM SA_RESTART/;
 
 my $default_interval = 3;
 my $nlines = 20;
@@ -90,7 +91,10 @@ sub syscalls::sys_enter_write
 
 sub trace_begin
 {
-    $SIG{ALRM} = \&set_print_pending;
+    my $sa = POSIX::SigAction->new(\&set_print_pending);
+    $sa->flags(SA_RESTART);
+    $sa->safe(1);
+    POSIX::sigaction(SIGALRM, $sa) or die "Can't set SIGALRM handler: $!\n";
     alarm 1;
 }
 
diff --git a/tools/perf/scripts/perl/workqueue-stats.pl b/tools/perf/scripts/perl/workqueue-stats.pl
deleted file mode 100644 (file)
index a8eaff5..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2009, Tom Zanussi <tzanussi@gmail.com>
-# Licensed under the terms of the GNU GPL License version 2
-
-# Displays workqueue stats
-#
-# Usage:
-#
-#   perf record -c 1 -f -a -R -e workqueue:workqueue_creation -e
-#     workqueue:workqueue_destruction -e workqueue:workqueue_execution
-#     -e workqueue:workqueue_insertion
-#
-#   perf script -p -s tools/perf/scripts/perl/workqueue-stats.pl
-
-use 5.010000;
-use strict;
-use warnings;
-
-use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib";
-use lib "./Perf-Trace-Util/lib";
-use Perf::Trace::Core;
-use Perf::Trace::Util;
-
-my @cpus;
-
-sub workqueue::workqueue_destruction
-{
-    my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
-       $common_pid, $common_comm,
-       $thread_comm, $thread_pid) = @_;
-
-    $cpus[$common_cpu]{$thread_pid}{destroyed}++;
-    $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub workqueue::workqueue_creation
-{
-    my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
-       $common_pid, $common_comm,
-       $thread_comm, $thread_pid, $cpu) = @_;
-
-    $cpus[$common_cpu]{$thread_pid}{created}++;
-    $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub workqueue::workqueue_execution
-{
-    my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
-       $common_pid, $common_comm,
-       $thread_comm, $thread_pid, $func) = @_;
-
-    $cpus[$common_cpu]{$thread_pid}{executed}++;
-    $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub workqueue::workqueue_insertion
-{
-    my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
-       $common_pid, $common_comm,
-       $thread_comm, $thread_pid, $func) = @_;
-
-    $cpus[$common_cpu]{$thread_pid}{inserted}++;
-    $cpus[$common_cpu]{$thread_pid}{comm} = $thread_comm;
-}
-
-sub trace_end
-{
-    print "workqueue work stats:\n\n";
-    my $cpu = 0;
-    printf("%3s %6s %6s\t%-20s\n", "cpu", "ins", "exec", "name");
-    printf("%3s %6s %6s\t%-20s\n", "---", "---", "----", "----");
-    foreach my $pidhash (@cpus) {
-       while ((my $pid, my $wqhash) = each %$pidhash) {
-           my $ins = $$wqhash{'inserted'} || 0;
-           my $exe = $$wqhash{'executed'} || 0;
-           my $comm = $$wqhash{'comm'} || "";
-           if ($ins || $exe) {
-               printf("%3u %6u %6u\t%-20s\n", $cpu, $ins, $exe, $comm);
-           }
-       }
-       $cpu++;
-    }
-
-    $cpu = 0;
-    print "\nworkqueue lifecycle stats:\n\n";
-    printf("%3s %6s %6s\t%-20s\n", "cpu", "created", "destroyed", "name");
-    printf("%3s %6s %6s\t%-20s\n", "---", "-------", "---------", "----");
-    foreach my $pidhash (@cpus) {
-       while ((my $pid, my $wqhash) = each %$pidhash) {
-           my $created = $$wqhash{'created'} || 0;
-           my $destroyed = $$wqhash{'destroyed'} || 0;
-           my $comm = $$wqhash{'comm'} || "";
-           if ($created || $destroyed) {
-               printf("%3u %6u %6u\t%-20s\n", $cpu, $created, $destroyed,
-                      $comm);
-           }
-       }
-       $cpu++;
-    }
-
-    print_unhandled();
-}
-
-my %unhandled;
-
-sub print_unhandled
-{
-    if ((scalar keys %unhandled) == 0) {
-       return;
-    }
-
-    print "\nunhandled events:\n\n";
-
-    printf("%-40s  %10s\n", "event", "count");
-    printf("%-40s  %10s\n", "----------------------------------------",
-          "-----------");
-
-    foreach my $event_name (keys %unhandled) {
-       printf("%-40s  %10d\n", $event_name, $unhandled{$event_name});
-    }
-}
-
-sub trace_unhandled
-{
-    my ($event_name, $context, $common_cpu, $common_secs, $common_nsecs,
-       $common_pid, $common_comm) = @_;
-
-    $unhandled{$event_name}++;
-}
index 25638a986257a58ef664f5d68b39833987988406..bdcceb886f77fc5f8ae91e3d79b2da4ccd11d09c 100644 (file)
  * permissions. All the event text files are stored there.
  */
 
+/*
+ * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
+ * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
+ */
+#define __SANE_USERSPACE_TYPES__
 #include <stdlib.h>
 #include <stdio.h>
 #include <inttypes.h>
@@ -33,8 +38,6 @@
 
 extern int verbose;
 
-bool test_attr__enabled;
-
 static char *dir;
 
 void test_attr__init(void)
@@ -146,7 +149,7 @@ static int run_dir(const char *d, const char *perf)
 {
        char cmd[3*PATH_MAX];
 
-       snprintf(cmd, 3*PATH_MAX, "python %s/attr.py -d %s/attr/ -p %s %s",
+       snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %s",
                 d, d, perf, verbose ? "-v" : "");
 
        return system(cmd);
index e702b82dcb86e91a528b287d250c8cf8d4ba0667..2f629ca485bc32f6c04c773f6ad36623d7ee171e 100644 (file)
@@ -68,7 +68,7 @@ class Event(dict):
             self[key] = val
 
     def __init__(self, name, data, base):
-        log.info("    Event %s" % name);
+        log.debug("    Event %s" % name);
         self.name  = name;
         self.group = ''
         self.add(base)
@@ -97,6 +97,14 @@ class Event(dict):
                 return False
         return True
 
+    def diff(self, other):
+        for t in Event.terms:
+            if not self.has_key(t) or not other.has_key(t):
+                continue
+            if not self.compare_data(self[t], other[t]):
+               log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+                
+
 # Test file description needs to have following sections:
 # [config]
 #   - just single instance in file
@@ -113,7 +121,7 @@ class Test(object):
         parser = ConfigParser.SafeConfigParser()
         parser.read(path)
 
-        log.warning("running '%s'" % path)
+        log.debug("running '%s'" % path)
 
         self.path     = path
         self.test_dir = options.test_dir
@@ -128,7 +136,7 @@ class Test(object):
 
         self.expect   = {}
         self.result   = {}
-        log.info("  loading expected events");
+        log.debug("  loading expected events");
         self.load_events(path, self.expect)
 
     def is_event(self, name):
@@ -164,7 +172,7 @@ class Test(object):
               self.perf, self.command, tempdir, self.args)
         ret = os.WEXITSTATUS(os.system(cmd))
 
-        log.info("  running '%s' ret %d " % (cmd, ret))
+        log.warning("  running '%s' ret %d " % (cmd, ret))
 
         if ret != int(self.ret):
             raise Unsup(self)
@@ -172,7 +180,7 @@ class Test(object):
     def compare(self, expect, result):
         match = {}
 
-        log.info("  compare");
+        log.debug("  compare");
 
         # For each expected event find all matching
         # events in result. Fail if there's not any.
@@ -187,10 +195,11 @@ class Test(object):
                 else:
                     log.debug("    ->FAIL");
 
-            log.info("    match: [%s] matches %s" % (exp_name, str(exp_list)))
+            log.debug("    match: [%s] matches %s" % (exp_name, str(exp_list)))
 
             # we did not any matching event - fail
             if (not exp_list):
+               exp_event.diff(res_event)
                 raise Fail(self, 'match failure');
 
             match[exp_name] = exp_list
@@ -208,10 +217,10 @@ class Test(object):
                 if res_group not in match[group]:
                     raise Fail(self, 'group failure')
 
-                log.info("    group: [%s] matches group leader %s" %
+                log.debug("    group: [%s] matches group leader %s" %
                          (exp_name, str(match[group])))
 
-        log.info("  matched")
+        log.debug("  matched")
 
     def resolve_groups(self, events):
         for name, event in events.items():
@@ -233,7 +242,7 @@ class Test(object):
             self.run_cmd(tempdir);
 
             # load events expectation for the test
-            log.info("  loading result events");
+            log.debug("  loading result events");
             for f in glob.glob(tempdir + '/event*'):
                 self.load_events(f, self.result);
 
index f1485d8e6a0b473550473784f9a8de435e544d57..5bc3880f7be5096d7366355763df58a0d4f2016d 100644 (file)
@@ -7,7 +7,7 @@ size=96
 config=0
 sample_period=4000
 sample_type=263
-read_format=7
+read_format=0
 disabled=1
 inherit=1
 pinned=0
index a6599e9a19d302134359941e92e579c465e80989..57739cacdb2aa10841b16ca1386fe6f8b0ab9db5 100644 (file)
@@ -6,12 +6,14 @@ args    = --group -e cycles,instructions kill >/dev/null 2>&1
 fd=1
 group_fd=-1
 sample_type=327
+read_format=4
 
 [event-2:base-record]
 fd=2
 group_fd=1
 config=1
 sample_type=327
+read_format=4
 mmap=0
 comm=0
 enable_on_exec=0
index 5a8359da38af9ed26f4c51ca84026bce2b3f2454..c5548d054aff5ba9ded3225494d9954f22c1e7b2 100644 (file)
@@ -1,11 +1,12 @@
 [config]
 command = record
-args    = -e '{cycles,instructions}' kill >/tmp/krava 2>&1
+args    = -e '{cycles,instructions}' kill >/dev/null 2>&1
 
 [event-1:base-record]
 fd=1
 group_fd=-1
 sample_type=327
+read_format=4
 
 [event-2:base-record]
 fd=2
@@ -13,6 +14,7 @@ group_fd=1
 type=0
 config=1
 sample_type=327
+read_format=4
 mmap=0
 comm=0
 enable_on_exec=0
index 186f675354945cac0e870502e450cec1e9a38a63..acb98e0e39f2957426a74c0e0a8858d2ead85331 100644 (file)
@@ -4,6 +4,7 @@
  * Builtin regression testing command: ever growing number of sanity tests
  */
 #include "builtin.h"
+#include "intlist.h"
 #include "tests.h"
 #include "debug.h"
 #include "color.h"
@@ -68,6 +69,14 @@ static struct test {
                .desc = "struct perf_event_attr setup",
                .func = test__attr,
        },
+       {
+               .desc = "Test matching and linking mutliple hists",
+               .func = test__hists_link,
+       },
+       {
+               .desc = "Try 'use perf' in python, checking link problems",
+               .func = test__python_use,
+       },
        {
                .func = NULL,
        },
@@ -97,7 +106,7 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
        return false;
 }
 
-static int __cmd_test(int argc, const char *argv[])
+static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 {
        int i = 0;
        int width = 0;
@@ -118,13 +127,28 @@ static int __cmd_test(int argc, const char *argv[])
                        continue;
 
                pr_info("%2d: %-*s:", i, width, tests[curr].desc);
+
+               if (intlist__find(skiplist, i)) {
+                       color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
+                       continue;
+               }
+
                pr_debug("\n--- start ---\n");
                err = tests[curr].func();
                pr_debug("---- end ----\n%s:", tests[curr].desc);
-               if (err)
-                       color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
-               else
+
+               switch (err) {
+               case TEST_OK:
                        pr_info(" Ok\n");
+                       break;
+               case TEST_SKIP:
+                       color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
+                       break;
+               case TEST_FAIL:
+               default:
+                       color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
+                       break;
+               }
        }
 
        return 0;
@@ -152,11 +176,14 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
        "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
        NULL,
        };
+       const char *skip = NULL;
        const struct option test_options[] = {
+       OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show symbol address, etc)"),
        OPT_END()
        };
+       struct intlist *skiplist = NULL;
 
        argc = parse_options(argc, argv, test_options, test_usage, 0);
        if (argc >= 1 && !strcmp(argv[0], "list"))
@@ -169,5 +196,8 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
        if (symbol__init() < 0)
                return -1;
 
-       return __cmd_test(argc, argv);
+       if (skip != NULL)
+               skiplist = intlist__new(skip);
+
+       return __cmd_test(argc, argv, skiplist);
 }
index e61fc828a158ac771b6e09897f78fc8593b3392c..0fd99a9adb916293d91cd80797355b92940906d8 100644 (file)
@@ -22,7 +22,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
                        for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
                                __perf_evsel__hw_cache_type_op_res_name(type, op, i,
                                                                        name, sizeof(name));
-                               err = parse_events(evlist, name, 0);
+                               err = parse_events(evlist, name);
                                if (err)
                                        ret = err;
                        }
@@ -70,7 +70,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
                 return -ENOMEM;
 
        for (i = 0; i < nr_names; ++i) {
-               err = parse_events(evlist, names[i], 0);
+               err = parse_events(evlist, names[i]);
                if (err) {
                        pr_debug("failed to parse event '%s', err %d\n",
                                 names[i], err);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
new file mode 100644 (file)
index 0000000..1be64a6
--- /dev/null
@@ -0,0 +1,500 @@
+#include "perf.h"
+#include "tests.h"
+#include "debug.h"
+#include "symbol.h"
+#include "sort.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "thread.h"
+#include "parse-events.h"
+
+static struct {
+       u32 pid;
+       const char *comm;
+} fake_threads[] = {
+       { 100, "perf" },
+       { 200, "perf" },
+       { 300, "bash" },
+};
+
+static struct {
+       u32 pid;
+       u64 start;
+       const char *filename;
+} fake_mmap_info[] = {
+       { 100, 0x40000, "perf" },
+       { 100, 0x50000, "libc" },
+       { 100, 0xf0000, "[kernel]" },
+       { 200, 0x40000, "perf" },
+       { 200, 0x50000, "libc" },
+       { 200, 0xf0000, "[kernel]" },
+       { 300, 0x40000, "bash" },
+       { 300, 0x50000, "libc" },
+       { 300, 0xf0000, "[kernel]" },
+};
+
+struct fake_sym {
+       u64 start;
+       u64 length;
+       const char *name;
+};
+
+static struct fake_sym perf_syms[] = {
+       { 700, 100, "main" },
+       { 800, 100, "run_command" },
+       { 900, 100, "cmd_record" },
+};
+
+static struct fake_sym bash_syms[] = {
+       { 700, 100, "main" },
+       { 800, 100, "xmalloc" },
+       { 900, 100, "xfree" },
+};
+
+static struct fake_sym libc_syms[] = {
+       { 700, 100, "malloc" },
+       { 800, 100, "free" },
+       { 900, 100, "realloc" },
+};
+
+static struct fake_sym kernel_syms[] = {
+       { 700, 100, "schedule" },
+       { 800, 100, "page_fault" },
+       { 900, 100, "sys_perf_event_open" },
+};
+
+static struct {
+       const char *dso_name;
+       struct fake_sym *syms;
+       size_t nr_syms;
+} fake_symbols[] = {
+       { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
+       { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
+       { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
+       { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
+};
+
+static struct machine *setup_fake_machine(struct machines *machines)
+{
+       struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
+       size_t i;
+
+       if (machine == NULL) {
+               pr_debug("Not enough memory for machine setup\n");
+               return NULL;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
+               struct thread *thread;
+
+               thread = machine__findnew_thread(machine, fake_threads[i].pid);
+               if (thread == NULL)
+                       goto out;
+
+               thread__set_comm(thread, fake_threads[i].comm);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
+               union perf_event fake_mmap_event = {
+                       .mmap = {
+                               .header = { .misc = PERF_RECORD_MISC_USER, },
+                               .pid = fake_mmap_info[i].pid,
+                               .start = fake_mmap_info[i].start,
+                               .len = 0x1000ULL,
+                               .pgoff = 0ULL,
+                       },
+               };
+
+               strcpy(fake_mmap_event.mmap.filename,
+                      fake_mmap_info[i].filename);
+
+               machine__process_mmap_event(machine, &fake_mmap_event);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
+               size_t k;
+               struct dso *dso;
+
+               dso = __dsos__findnew(&machine->user_dsos,
+                                     fake_symbols[i].dso_name);
+               if (dso == NULL)
+                       goto out;
+
+               /* emulate dso__load() */
+               dso__set_loaded(dso, MAP__FUNCTION);
+
+               for (k = 0; k < fake_symbols[i].nr_syms; k++) {
+                       struct symbol *sym;
+                       struct fake_sym *fsym = &fake_symbols[i].syms[k];
+
+                       sym = symbol__new(fsym->start, fsym->length,
+                                         STB_GLOBAL, fsym->name);
+                       if (sym == NULL)
+                               goto out;
+
+                       symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
+               }
+       }
+
+       return machine;
+
+out:
+       pr_debug("Not enough memory for machine setup\n");
+       machine__delete_threads(machine);
+       machine__delete(machine);
+       return NULL;
+}
+
+struct sample {
+       u32 pid;
+       u64 ip;
+       struct thread *thread;
+       struct map *map;
+       struct symbol *sym;
+};
+
+static struct sample fake_common_samples[] = {
+       /* perf [kernel] schedule() */
+       { .pid = 100, .ip = 0xf0000 + 700, },
+       /* perf [perf]   main() */
+       { .pid = 200, .ip = 0x40000 + 700, },
+       /* perf [perf]   cmd_record() */
+       { .pid = 200, .ip = 0x40000 + 900, },
+       /* bash [bash]   xmalloc() */
+       { .pid = 300, .ip = 0x40000 + 800, },
+       /* bash [libc]   malloc() */
+       { .pid = 300, .ip = 0x50000 + 700, },
+};
+
+static struct sample fake_samples[][5] = {
+       {
+               /* perf [perf]   run_command() */
+               { .pid = 100, .ip = 0x40000 + 800, },
+               /* perf [libc]   malloc() */
+               { .pid = 100, .ip = 0x50000 + 700, },
+               /* perf [kernel] page_fault() */
+               { .pid = 100, .ip = 0xf0000 + 800, },
+               /* perf [kernel] sys_perf_event_open() */
+               { .pid = 200, .ip = 0xf0000 + 900, },
+               /* bash [libc]   free() */
+               { .pid = 300, .ip = 0x50000 + 800, },
+       },
+       {
+               /* perf [libc]   free() */
+               { .pid = 200, .ip = 0x50000 + 800, },
+               /* bash [libc]   malloc() */
+               { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
+               /* bash [bash]   xfee() */
+               { .pid = 300, .ip = 0x40000 + 900, },
+               /* bash [libc]   realloc() */
+               { .pid = 300, .ip = 0x50000 + 900, },
+               /* bash [kernel] page_fault() */
+               { .pid = 300, .ip = 0xf0000 + 800, },
+       },
+};
+
+static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
+{
+       struct perf_evsel *evsel;
+       struct addr_location al;
+       struct hist_entry *he;
+       struct perf_sample sample = { .cpu = 0, };
+       size_t i = 0, k;
+
+       /*
+        * each evsel will have 10 samples - 5 common and 5 distinct.
+        * However the second evsel also has a collapsed entry for
+        * "bash [libc] malloc" so total 9 entries will be in the tree.
+        */
+       list_for_each_entry(evsel, &evlist->entries, node) {
+               for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
+                       const union perf_event event = {
+                               .ip = {
+                                       .header = {
+                                               .misc = PERF_RECORD_MISC_USER,
+                                       },
+                                       .pid = fake_common_samples[k].pid,
+                                       .ip  = fake_common_samples[k].ip,
+                               },
+                       };
+
+                       if (perf_event__preprocess_sample(&event, machine, &al,
+                                                         &sample, 0) < 0)
+                               goto out;
+
+                       he = __hists__add_entry(&evsel->hists, &al, NULL, 1);
+                       if (he == NULL)
+                               goto out;
+
+                       fake_common_samples[k].thread = al.thread;
+                       fake_common_samples[k].map = al.map;
+                       fake_common_samples[k].sym = al.sym;
+               }
+
+               for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
+                       const union perf_event event = {
+                               .ip = {
+                                       .header = {
+                                               .misc = PERF_RECORD_MISC_USER,
+                                       },
+                                       .pid = fake_samples[i][k].pid,
+                                       .ip  = fake_samples[i][k].ip,
+                               },
+                       };
+
+                       if (perf_event__preprocess_sample(&event, machine, &al,
+                                                         &sample, 0) < 0)
+                               goto out;
+
+                       he = __hists__add_entry(&evsel->hists, &al, NULL, 1);
+                       if (he == NULL)
+                               goto out;
+
+                       fake_samples[i][k].thread = al.thread;
+                       fake_samples[i][k].map = al.map;
+                       fake_samples[i][k].sym = al.sym;
+               }
+               i++;
+       }
+
+       return 0;
+
+out:
+       pr_debug("Not enough memory for adding a hist entry\n");
+       return -1;
+}
+
+static int find_sample(struct sample *samples, size_t nr_samples,
+                      struct thread *t, struct map *m, struct symbol *s)
+{
+       while (nr_samples--) {
+               if (samples->thread == t && samples->map == m &&
+                   samples->sym == s)
+                       return 1;
+               samples++;
+       }
+       return 0;
+}
+
+static int __validate_match(struct hists *hists)
+{
+       size_t count = 0;
+       struct rb_root *root;
+       struct rb_node *node;
+
+       /*
+        * Only entries from fake_common_samples should have a pair.
+        */
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       node = rb_first(root);
+       while (node) {
+               struct hist_entry *he;
+
+               he = rb_entry(node, struct hist_entry, rb_node_in);
+
+               if (hist_entry__has_pairs(he)) {
+                       if (find_sample(fake_common_samples,
+                                       ARRAY_SIZE(fake_common_samples),
+                                       he->thread, he->ms.map, he->ms.sym)) {
+                               count++;
+                       } else {
+                               pr_debug("Can't find the matched entry\n");
+                               return -1;
+                       }
+               }
+
+               node = rb_next(node);
+       }
+
+       if (count != ARRAY_SIZE(fake_common_samples)) {
+               pr_debug("Invalid count for matched entries: %zd of %zd\n",
+                        count, ARRAY_SIZE(fake_common_samples));
+               return -1;
+       }
+
+       return 0;
+}
+
+static int validate_match(struct hists *leader, struct hists *other)
+{
+       return __validate_match(leader) || __validate_match(other);
+}
+
+static int __validate_link(struct hists *hists, int idx)
+{
+       size_t count = 0;
+       size_t count_pair = 0;
+       size_t count_dummy = 0;
+       struct rb_root *root;
+       struct rb_node *node;
+
+       /*
+        * Leader hists (idx = 0) will have dummy entries from other,
+        * and some entries will have no pair.  However every entry
+        * in other hists should have (dummy) pair.
+        */
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       node = rb_first(root);
+       while (node) {
+               struct hist_entry *he;
+
+               he = rb_entry(node, struct hist_entry, rb_node_in);
+
+               if (hist_entry__has_pairs(he)) {
+                       if (!find_sample(fake_common_samples,
+                                        ARRAY_SIZE(fake_common_samples),
+                                        he->thread, he->ms.map, he->ms.sym) &&
+                           !find_sample(fake_samples[idx],
+                                        ARRAY_SIZE(fake_samples[idx]),
+                                        he->thread, he->ms.map, he->ms.sym)) {
+                               count_dummy++;
+                       }
+                       count_pair++;
+               } else if (idx) {
+                       pr_debug("A entry from the other hists should have pair\n");
+                       return -1;
+               }
+
+               count++;
+               node = rb_next(node);
+       }
+
+       /*
+        * Note that we have a entry collapsed in the other (idx = 1) hists.
+        */
+       if (idx == 0) {
+               if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
+                       pr_debug("Invalid count of dummy entries: %zd of %zd\n",
+                                count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
+                       return -1;
+               }
+               if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
+                       pr_debug("Invalid count of total leader entries: %zd of %zd\n",
+                                count, count_pair + ARRAY_SIZE(fake_samples[0]));
+                       return -1;
+               }
+       } else {
+               if (count != count_pair) {
+                       pr_debug("Invalid count of total other entries: %zd of %zd\n",
+                                count, count_pair);
+                       return -1;
+               }
+               if (count_dummy > 0) {
+                       pr_debug("Other hists should not have dummy entries: %zd\n",
+                                count_dummy);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+static int validate_link(struct hists *leader, struct hists *other)
+{
+       return __validate_link(leader, 0) || __validate_link(other, 1);
+}
+
+static void print_hists(struct hists *hists)
+{
+       int i = 0;
+       struct rb_root *root;
+       struct rb_node *node;
+
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       pr_info("----- %s --------\n", __func__);
+       node = rb_first(root);
+       while (node) {
+               struct hist_entry *he;
+
+               he = rb_entry(node, struct hist_entry, rb_node_in);
+
+               pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
+                       i, he->thread->comm, he->ms.map->dso->short_name,
+                       he->ms.sym->name, he->stat.period);
+
+               i++;
+               node = rb_next(node);
+       }
+}
+
+int test__hists_link(void)
+{
+       int err = -1;
+       struct machines machines;
+       struct machine *machine = NULL;
+       struct perf_evsel *evsel, *first;
+        struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
+
+       if (evlist == NULL)
+                return -ENOMEM;
+
+       err = parse_events(evlist, "cpu-clock");
+       if (err)
+               goto out;
+       err = parse_events(evlist, "task-clock");
+       if (err)
+               goto out;
+
+       /* default sort order (comm,dso,sym) will be used */
+       if (setup_sorting() < 0)
+               goto out;
+
+       machines__init(&machines);
+
+       /* setup threads/dso/map/symbols also */
+       machine = setup_fake_machine(&machines);
+       if (!machine)
+               goto out;
+
+       if (verbose > 1)
+               machine__fprintf(machine, stderr);
+
+       /* process sample events */
+       err = add_hist_entries(evlist, machine);
+       if (err < 0)
+               goto out;
+
+       list_for_each_entry(evsel, &evlist->entries, node) {
+               hists__collapse_resort(&evsel->hists);
+
+               if (verbose > 2)
+                       print_hists(&evsel->hists);
+       }
+
+       first = perf_evlist__first(evlist);
+       evsel = perf_evlist__last(evlist);
+
+       /* match common entries */
+       hists__match(&first->hists, &evsel->hists);
+       err = validate_match(&first->hists, &evsel->hists);
+       if (err)
+               goto out;
+
+       /* link common and/or dummy entries */
+       hists__link(&first->hists, &evsel->hists);
+       err = validate_link(&first->hists, &evsel->hists);
+       if (err)
+               goto out;
+
+       err = 0;
+
+out:
+       /* tear down everything */
+       perf_evlist__delete(evlist);
+       machines__exit(&machines);
+
+       return err;
+}
index e1746811e14bab193788e294540119f3f7100de3..cdd50755af51d6155e1fcf875965ae5576fe70d1 100644 (file)
@@ -22,36 +22,16 @@ int test__basic_mmap(void)
        struct thread_map *threads;
        struct cpu_map *cpus;
        struct perf_evlist *evlist;
-       struct perf_event_attr attr = {
-               .type           = PERF_TYPE_TRACEPOINT,
-               .read_format    = PERF_FORMAT_ID,
-               .sample_type    = PERF_SAMPLE_ID,
-               .watermark      = 0,
-       };
        cpu_set_t cpu_set;
        const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
                                        "getpgid", };
        pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
                                      (void*)getpgid };
 #define nsyscalls ARRAY_SIZE(syscall_names)
-       int ids[nsyscalls];
        unsigned int nr_events[nsyscalls],
                     expected_nr_events[nsyscalls], i, j;
        struct perf_evsel *evsels[nsyscalls], *evsel;
 
-       for (i = 0; i < nsyscalls; ++i) {
-               char name[64];
-
-               snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
-               ids[i] = trace_event__id(name);
-               if (ids[i] < 0) {
-                       pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
-                       return -1;
-               }
-               nr_events[i] = 0;
-               expected_nr_events[i] = random() % 257;
-       }
-
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        if (threads == NULL) {
                pr_debug("thread_map__new\n");
@@ -79,18 +59,19 @@ int test__basic_mmap(void)
                goto out_free_cpus;
        }
 
-       /* anonymous union fields, can't be initialized above */
-       attr.wakeup_events = 1;
-       attr.sample_period = 1;
-
        for (i = 0; i < nsyscalls; ++i) {
-               attr.config = ids[i];
-               evsels[i] = perf_evsel__new(&attr, i);
+               char name[64];
+
+               snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
+               evsels[i] = perf_evsel__newtp("syscalls", name, i);
                if (evsels[i] == NULL) {
                        pr_debug("perf_evsel__new\n");
                        goto out_free_evlist;
                }
 
+               evsels[i]->attr.wakeup_events = 1;
+               perf_evsel__set_sample_id(evsels[i]);
+
                perf_evlist__add(evlist, evsels[i]);
 
                if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
@@ -99,6 +80,9 @@ int test__basic_mmap(void)
                                 strerror(errno));
                        goto out_close_fd;
                }
+
+               nr_events[i] = 0;
+               expected_nr_events[i] = 1 + rand() % 127;
        }
 
        if (perf_evlist__mmap(evlist, 128, true) < 0) {
@@ -128,6 +112,7 @@ int test__basic_mmap(void)
                        goto out_munmap;
                }
 
+               err = -1;
                evsel = perf_evlist__id2evsel(evlist, sample.id);
                if (evsel == NULL) {
                        pr_debug("event with id %" PRIu64
@@ -137,16 +122,17 @@ int test__basic_mmap(void)
                nr_events[evsel->idx]++;
        }
 
+       err = 0;
        list_for_each_entry(evsel, &evlist->entries, node) {
                if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
                        pr_debug("expected %d %s events, got %d\n",
                                 expected_nr_events[evsel->idx],
                                 perf_evsel__name(evsel), nr_events[evsel->idx]);
+                       err = -1;
                        goto out_munmap;
                }
        }
 
-       err = 0;
 out_munmap:
        perf_evlist__munmap(evlist);
 out_close_fd:
index 31072aba0d549d1690ee147e5cd06102de8852b3..b0657a9ccda638febdcd39c40f30cecf06c0ece6 100644 (file)
@@ -7,20 +7,12 @@
 int test__open_syscall_event_on_all_cpus(void)
 {
        int err = -1, fd, cpu;
-       struct thread_map *threads;
        struct cpu_map *cpus;
        struct perf_evsel *evsel;
-       struct perf_event_attr attr;
        unsigned int nr_open_calls = 111, i;
        cpu_set_t cpu_set;
-       int id = trace_event__id("sys_enter_open");
+       struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
 
-       if (id < 0) {
-               pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
-               return -1;
-       }
-
-       threads = thread_map__new(-1, getpid(), UINT_MAX);
        if (threads == NULL) {
                pr_debug("thread_map__new\n");
                return -1;
@@ -32,15 +24,11 @@ int test__open_syscall_event_on_all_cpus(void)
                goto out_thread_map_delete;
        }
 
-
        CPU_ZERO(&cpu_set);
 
-       memset(&attr, 0, sizeof(attr));
-       attr.type = PERF_TYPE_TRACEPOINT;
-       attr.config = id;
-       evsel = perf_evsel__new(&attr, 0);
+       evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
        if (evsel == NULL) {
-               pr_debug("perf_evsel__new\n");
+               pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
                goto out_thread_map_delete;
        }
 
@@ -110,6 +98,7 @@ int test__open_syscall_event_on_all_cpus(void)
                }
        }
 
+       perf_evsel__free_counts(evsel);
 out_close_fd:
        perf_evsel__close_fd(evsel, 1, threads->nr);
 out_evsel_delete:
index 98be8b518b4fe6613fe5988594bb430768a5ccca..befc0671f95dab0162ec67c5e7ed60ea7f0c0b53 100644 (file)
@@ -6,29 +6,18 @@
 int test__open_syscall_event(void)
 {
        int err = -1, fd;
-       struct thread_map *threads;
        struct perf_evsel *evsel;
-       struct perf_event_attr attr;
        unsigned int nr_open_calls = 111, i;
-       int id = trace_event__id("sys_enter_open");
+       struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
 
-       if (id < 0) {
-               pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
-               return -1;
-       }
-
-       threads = thread_map__new(-1, getpid(), UINT_MAX);
        if (threads == NULL) {
                pr_debug("thread_map__new\n");
                return -1;
        }
 
-       memset(&attr, 0, sizeof(attr));
-       attr.type = PERF_TYPE_TRACEPOINT;
-       attr.config = id;
-       evsel = perf_evsel__new(&attr, 0);
+       evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0);
        if (evsel == NULL) {
-               pr_debug("perf_evsel__new\n");
+               pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
                goto out_thread_map_delete;
        }
 
index 32ee478905eb8ed0bd182bed13fd3a57bb0746bd..c5636f36fe3128e279115ff3ece1faddacfb58d5 100644 (file)
@@ -3,6 +3,7 @@
 #include "evsel.h"
 #include "evlist.h"
 #include "sysfs.h"
+#include "debugfs.h"
 #include "tests.h"
 #include <linux/hw_breakpoint.h>
 
@@ -22,6 +23,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
        struct perf_evsel *evsel = perf_evlist__first(evlist);
 
        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
        TEST_ASSERT_VAL("wrong sample_type",
                PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
@@ -34,6 +36,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
        struct perf_evsel *evsel;
 
        TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
+       TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
 
        list_for_each_entry(evsel, &evlist->entries, node) {
                TEST_ASSERT_VAL("wrong type",
@@ -463,10 +466,10 @@ static int test__checkevent_pmu_events(struct perf_evlist *evlist)
 
 static int test__checkterms_simple(struct list_head *terms)
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
        /* config=10 */
-       term = list_entry(terms->next, struct parse_events__term, list);
+       term = list_entry(terms->next, struct parse_events_term, list);
        TEST_ASSERT_VAL("wrong type term",
                        term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
        TEST_ASSERT_VAL("wrong type val",
@@ -475,7 +478,7 @@ static int test__checkterms_simple(struct list_head *terms)
        TEST_ASSERT_VAL("wrong config", !term->config);
 
        /* config1 */
-       term = list_entry(term->list.next, struct parse_events__term, list);
+       term = list_entry(term->list.next, struct parse_events_term, list);
        TEST_ASSERT_VAL("wrong type term",
                        term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
        TEST_ASSERT_VAL("wrong type val",
@@ -484,7 +487,7 @@ static int test__checkterms_simple(struct list_head *terms)
        TEST_ASSERT_VAL("wrong config", !term->config);
 
        /* config2=3 */
-       term = list_entry(term->list.next, struct parse_events__term, list);
+       term = list_entry(term->list.next, struct parse_events_term, list);
        TEST_ASSERT_VAL("wrong type term",
                        term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
        TEST_ASSERT_VAL("wrong type val",
@@ -493,7 +496,7 @@ static int test__checkterms_simple(struct list_head *terms)
        TEST_ASSERT_VAL("wrong config", !term->config);
 
        /* umask=1*/
-       term = list_entry(term->list.next, struct parse_events__term, list);
+       term = list_entry(term->list.next, struct parse_events_term, list);
        TEST_ASSERT_VAL("wrong type term",
                        term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
        TEST_ASSERT_VAL("wrong type val",
@@ -509,6 +512,7 @@ static int test__group1(struct perf_evlist *evlist)
        struct perf_evsel *evsel, *leader;
 
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
 
        /* instructions:k */
        evsel = leader = perf_evlist__first(evlist);
@@ -521,7 +525,9 @@ static int test__group1(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* cycles:upp */
        evsel = perf_evsel__next(evsel);
@@ -536,6 +542,7 @@ static int test__group1(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        return 0;
 }
@@ -545,6 +552,7 @@ static int test__group2(struct perf_evlist *evlist)
        struct perf_evsel *evsel, *leader;
 
        TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
 
        /* faults + :ku modifier */
        evsel = leader = perf_evlist__first(evlist);
@@ -557,7 +565,9 @@ static int test__group2(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* cache-references + :u modifier */
        evsel = perf_evsel__next(evsel);
@@ -567,10 +577,11 @@ static int test__group2(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
-       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        /* cycles:k */
        evsel = perf_evsel__next(evsel);
@@ -583,7 +594,7 @@ static int test__group2(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
 
        return 0;
 }
@@ -593,6 +604,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        struct perf_evsel *evsel, *leader;
 
        TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
 
        /* group1 syscalls:sys_enter_open:H */
        evsel = leader = perf_evlist__first(evlist);
@@ -606,9 +618,11 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
        TEST_ASSERT_VAL("wrong group name",
                !strcmp(leader->group_name, "group1"));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* group1 cycles:kppp */
        evsel = perf_evsel__next(evsel);
@@ -624,6 +638,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
        TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        /* group2 cycles + G modifier */
        evsel = leader = perf_evsel__next(evsel);
@@ -636,9 +651,11 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
        TEST_ASSERT_VAL("wrong group name",
                !strcmp(leader->group_name, "group2"));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* group2 1:3 + G modifier */
        evsel = perf_evsel__next(evsel);
@@ -651,6 +668,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        /* instructions:u */
        evsel = perf_evsel__next(evsel);
@@ -663,7 +681,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
 
        return 0;
 }
@@ -673,6 +691,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
        struct perf_evsel *evsel, *leader;
 
        TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
 
        /* cycles:u + p */
        evsel = leader = perf_evlist__first(evlist);
@@ -687,7 +706,9 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
        TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* instructions:kp + p */
        evsel = perf_evsel__next(evsel);
@@ -702,6 +723,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        return 0;
 }
@@ -711,6 +733,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
        struct perf_evsel *evsel, *leader;
 
        TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
 
        /* cycles + G */
        evsel = leader = perf_evlist__first(evlist);
@@ -724,7 +747,9 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
        TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* instructions + G */
        evsel = perf_evsel__next(evsel);
@@ -738,6 +763,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        /* cycles:G */
        evsel = leader = perf_evsel__next(evsel);
@@ -751,7 +777,9 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
        TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
 
        /* instructions:G */
        evsel = perf_evsel__next(evsel);
@@ -765,6 +793,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        /* cycles */
        evsel = perf_evsel__next(evsel);
@@ -777,18 +806,235 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
-       TEST_ASSERT_VAL("wrong leader", !perf_evsel__is_group_member(evsel));
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+
+       return 0;
+}
+
+static int test__group_gh1(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel, *leader;
+
+       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+
+       /* cycles + :H group modifier */
+       evsel = leader = perf_evlist__first(evlist);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+
+       /* cache-misses:G + :H group modifier */
+       evsel = perf_evsel__next(evsel);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+
+       return 0;
+}
+
+static int test__group_gh2(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel, *leader;
+
+       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+
+       /* cycles + :G group modifier */
+       evsel = leader = perf_evlist__first(evlist);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+
+       /* cache-misses:H + :G group modifier */
+       evsel = perf_evsel__next(evsel);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+
+       return 0;
+}
+
+static int test__group_gh3(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel, *leader;
+
+       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+
+       /* cycles:G + :u group modifier */
+       evsel = leader = perf_evlist__first(evlist);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+
+       /* cache-misses:H + :u group modifier */
+       evsel = perf_evsel__next(evsel);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+
+       return 0;
+}
+
+static int test__group_gh4(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel, *leader;
+
+       TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups);
+
+       /* cycles:G + :uG group modifier */
+       evsel = leader = perf_evlist__first(evlist);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+       TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+       TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+
+       /* cache-misses:H + :uG group modifier */
+       evsel = perf_evsel__next(evsel);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config",
+                       PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+       TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+       TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
 
        return 0;
 }
 
-struct test__event_st {
+static int count_tracepoints(void)
+{
+       char events_path[PATH_MAX];
+       struct dirent *events_ent;
+       DIR *events_dir;
+       int cnt = 0;
+
+       scnprintf(events_path, PATH_MAX, "%s/tracing/events",
+                 debugfs_find_mountpoint());
+
+       events_dir = opendir(events_path);
+
+       TEST_ASSERT_VAL("Can't open events dir", events_dir);
+
+       while ((events_ent = readdir(events_dir))) {
+               char sys_path[PATH_MAX];
+               struct dirent *sys_ent;
+               DIR *sys_dir;
+
+               if (!strcmp(events_ent->d_name, ".")
+                   || !strcmp(events_ent->d_name, "..")
+                   || !strcmp(events_ent->d_name, "enable")
+                   || !strcmp(events_ent->d_name, "header_event")
+                   || !strcmp(events_ent->d_name, "header_page"))
+                       continue;
+
+               scnprintf(sys_path, PATH_MAX, "%s/%s",
+                         events_path, events_ent->d_name);
+
+               sys_dir = opendir(sys_path);
+               TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
+
+               while ((sys_ent = readdir(sys_dir))) {
+                       if (!strcmp(sys_ent->d_name, ".")
+                           || !strcmp(sys_ent->d_name, "..")
+                           || !strcmp(sys_ent->d_name, "enable")
+                           || !strcmp(sys_ent->d_name, "filter"))
+                               continue;
+
+                       cnt++;
+               }
+
+               closedir(sys_dir);
+       }
+
+       closedir(events_dir);
+       return cnt;
+}
+
+static int test__all_tracepoints(struct perf_evlist *evlist)
+{
+       TEST_ASSERT_VAL("wrong events count",
+                       count_tracepoints() == evlist->nr_entries);
+
+       return test__checkevent_tracepoint_multi(evlist);
+}
+
+struct evlist_test {
        const char *name;
        __u32 type;
        int (*check)(struct perf_evlist *evlist);
 };
 
-static struct test__event_st test__events[] = {
+static struct evlist_test test__events[] = {
        [0] = {
                .name  = "syscalls:sys_enter_open",
                .check = test__checkevent_tracepoint,
@@ -921,9 +1167,29 @@ static struct test__event_st test__events[] = {
                .name  = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
                .check = test__group5,
        },
+       [33] = {
+               .name  = "*:*",
+               .check = test__all_tracepoints,
+       },
+       [34] = {
+               .name  = "{cycles,cache-misses:G}:H",
+               .check = test__group_gh1,
+       },
+       [35] = {
+               .name  = "{cycles,cache-misses:H}:G",
+               .check = test__group_gh2,
+       },
+       [36] = {
+               .name  = "{cycles:G,cache-misses:H}:u",
+               .check = test__group_gh3,
+       },
+       [37] = {
+               .name  = "{cycles:G,cache-misses:H}:uG",
+               .check = test__group_gh4,
+       },
 };
 
-static struct test__event_st test__events_pmu[] = {
+static struct evlist_test test__events_pmu[] = {
        [0] = {
                .name  = "cpu/config=10,config1,config2=3,period=1000/u",
                .check = test__checkevent_pmu,
@@ -934,20 +1200,20 @@ static struct test__event_st test__events_pmu[] = {
        },
 };
 
-struct test__term {
+struct terms_test {
        const char *str;
        __u32 type;
        int (*check)(struct list_head *terms);
 };
 
-static struct test__term test__terms[] = {
+static struct terms_test test__terms[] = {
        [0] = {
                .str   = "config=10,config1,config2=3,umask=1",
                .check = test__checkterms_simple,
        },
 };
 
-static int test_event(struct test__event_st *e)
+static int test_event(struct evlist_test *e)
 {
        struct perf_evlist *evlist;
        int ret;
@@ -956,7 +1222,7 @@ static int test_event(struct test__event_st *e)
        if (evlist == NULL)
                return -ENOMEM;
 
-       ret = parse_events(evlist, e->name, 0);
+       ret = parse_events(evlist, e->name);
        if (ret) {
                pr_debug("failed to parse event '%s', err %d\n",
                         e->name, ret);
@@ -969,13 +1235,13 @@ static int test_event(struct test__event_st *e)
        return ret;
 }
 
-static int test_events(struct test__event_st *events, unsigned cnt)
+static int test_events(struct evlist_test *events, unsigned cnt)
 {
        int ret1, ret2 = 0;
        unsigned i;
 
        for (i = 0; i < cnt; i++) {
-               struct test__event_st *e = &events[i];
+               struct evlist_test *e = &events[i];
 
                pr_debug("running test %d '%s'\n", i, e->name);
                ret1 = test_event(e);
@@ -986,7 +1252,7 @@ static int test_events(struct test__event_st *events, unsigned cnt)
        return ret2;
 }
 
-static int test_term(struct test__term *t)
+static int test_term(struct terms_test *t)
 {
        struct list_head *terms;
        int ret;
@@ -1010,13 +1276,13 @@ static int test_term(struct test__term *t)
        return ret;
 }
 
-static int test_terms(struct test__term *terms, unsigned cnt)
+static int test_terms(struct terms_test *terms, unsigned cnt)
 {
        int ret = 0;
        unsigned i;
 
        for (i = 0; i < cnt; i++) {
-               struct test__term *t = &terms[i];
+               struct terms_test *t = &terms[i];
 
                pr_debug("running test %d '%s'\n", i, t->str);
                ret = test_term(t);
@@ -1067,7 +1333,7 @@ static int test_pmu_events(void)
 
        while (!ret && (ent = readdir(dir))) {
 #define MAX_NAME 100
-               struct test__event_st e;
+               struct evlist_test e;
                char name[MAX_NAME];
 
                if (!strcmp(ent->d_name, ".") ||
index 70e0d4421df884ba9a5f1091b47d52e1fb362024..1e8e5128d0da31a94fccfadc036cd8e0e48da8f4 100644 (file)
@@ -96,22 +96,22 @@ int test__PERF_RECORD(void)
        err = perf_evlist__prepare_workload(evlist, &opts, argv);
        if (err < 0) {
                pr_debug("Couldn't run the workload!\n");
-               goto out_delete_evlist;
+               goto out_delete_maps;
        }
 
        /*
         * Config the evsels, setting attr->comm on the first one, etc.
         */
        evsel = perf_evlist__first(evlist);
-       evsel->attr.sample_type |= PERF_SAMPLE_CPU;
-       evsel->attr.sample_type |= PERF_SAMPLE_TID;
-       evsel->attr.sample_type |= PERF_SAMPLE_TIME;
-       perf_evlist__config_attrs(evlist, &opts);
+       perf_evsel__set_sample_bit(evsel, CPU);
+       perf_evsel__set_sample_bit(evsel, TID);
+       perf_evsel__set_sample_bit(evsel, TIME);
+       perf_evlist__config(evlist, &opts);
 
        err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
        if (err < 0) {
                pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
-               goto out_delete_evlist;
+               goto out_delete_maps;
        }
 
        cpu = err;
@@ -121,7 +121,7 @@ int test__PERF_RECORD(void)
         */
        if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
                pr_debug("sched_setaffinity: %s\n", strerror(errno));
-               goto out_delete_evlist;
+               goto out_delete_maps;
        }
 
        /*
@@ -131,7 +131,7 @@ int test__PERF_RECORD(void)
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("perf_evlist__open: %s\n", strerror(errno));
-               goto out_delete_evlist;
+               goto out_delete_maps;
        }
 
        /*
@@ -142,7 +142,7 @@ int test__PERF_RECORD(void)
        err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
        if (err < 0) {
                pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
-               goto out_delete_evlist;
+               goto out_delete_maps;
        }
 
        /*
@@ -305,6 +305,8 @@ found_exit:
        }
 out_err:
        perf_evlist__munmap(evlist);
+out_delete_maps:
+       perf_evlist__delete_maps(evlist);
 out_delete_evlist:
        perf_evlist__delete(evlist);
 out:
index a5f379863b8f8559a8b9d093dbe71fe426416175..12b322fa34753b9d463762276dc7ec4e9b0c909a 100644 (file)
@@ -19,10 +19,8 @@ static struct test_format {
        { "krava23", "config2:28-29,38\n", },
 };
 
-#define TEST_FORMATS_CNT (sizeof(test_formats) / sizeof(struct test_format))
-
 /* Simulated users input. */
-static struct parse_events__term test_terms[] = {
+static struct parse_events_term test_terms[] = {
        {
                .config    = (char *) "krava01",
                .val.num   = 15,
@@ -78,7 +76,6 @@ static struct parse_events__term test_terms[] = {
                .type_term = PARSE_EVENTS__TERM_TYPE_USER,
        },
 };
-#define TERMS_CNT (sizeof(test_terms) / sizeof(struct parse_events__term))
 
 /*
  * Prepare format directory data, exported by kernel
@@ -93,7 +90,7 @@ static char *test_format_dir_get(void)
        if (!mkdtemp(dir))
                return NULL;
 
-       for (i = 0; i < TEST_FORMATS_CNT; i++) {
+       for (i = 0; i < ARRAY_SIZE(test_formats); i++) {
                static char name[PATH_MAX];
                struct test_format *format = &test_formats[i];
                FILE *file;
@@ -130,14 +127,12 @@ static struct list_head *test_terms_list(void)
        static LIST_HEAD(terms);
        unsigned int i;
 
-       for (i = 0; i < TERMS_CNT; i++)
+       for (i = 0; i < ARRAY_SIZE(test_terms); i++)
                list_add_tail(&test_terms[i].list, &terms);
 
        return &terms;
 }
 
-#undef TERMS_CNT
-
 int test__pmu(void)
 {
        char *format = test_format_dir_get();
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
new file mode 100644 (file)
index 0000000..7760277
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Just test if we can load the python binding.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "tests.h"
+
+extern int verbose;
+
+int test__python_use(void)
+{
+       char *cmd;
+       int ret;
+
+       if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s",
+                    PYTHONPATH, PYTHON, verbose ? "" : "2> /dev/null") < 0)
+               return -1;
+
+       ret = system(cmd) ? -1 : 0;
+       free(cmd);
+       return ret;
+}
index fc121edab0168abc7b5ecd8597ddc97c392c3e7a..5de0be1ff4b614112c22e7c65d3a9808596ae9bc 100644 (file)
@@ -1,6 +1,12 @@
 #ifndef TESTS_H
 #define TESTS_H
 
+enum {
+       TEST_OK   =  0,
+       TEST_FAIL = -1,
+       TEST_SKIP = -2,
+};
+
 /* Tests */
 int test__vmlinux_matches_kallsyms(void);
 int test__open_syscall_event(void);
@@ -15,8 +21,7 @@ int test__pmu(void);
 int test__attr(void);
 int test__dso_data(void);
 int test__parse_events(void);
-
-/* Util */
-int trace_event__id(const char *evname);
+int test__hists_link(void);
+int test__python_use(void);
 
 #endif /* TESTS_H */
diff --git a/tools/perf/tests/util.c b/tools/perf/tests/util.c
deleted file mode 100644 (file)
index 748f2e8..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include "tests.h"
-#include "debugfs.h"
-
-int trace_event__id(const char *evname)
-{
-       char *filename;
-       int err = -1, fd;
-
-       if (asprintf(&filename,
-                    "%s/syscalls/%s/id",
-                    tracing_events_path, evname) < 0)
-               return -1;
-
-       fd = open(filename, O_RDONLY);
-       if (fd >= 0) {
-               char id[16];
-               if (read(fd, id, sizeof(id)) > 0)
-                       err = atoi(id);
-               close(fd);
-       }
-
-       free(filename);
-       return err;
-}
index 0d1cdbee2f59ed66a524d5c85fddb62ecdb8dd10..7b4c4d26d1baed24e2c5999d76457616c78c5d91 100644 (file)
@@ -44,7 +44,7 @@ int test__vmlinux_matches_kallsyms(void)
         */
        if (machine__create_kernel_maps(&kallsyms) < 0) {
                pr_debug("machine__create_kernel_maps ");
-               return -1;
+               goto out;
        }
 
        /*
@@ -101,7 +101,8 @@ int test__vmlinux_matches_kallsyms(void)
         */
        if (machine__load_vmlinux_path(&vmlinux, type,
                                       vmlinux_matches_kallsyms_filter) <= 0) {
-               pr_debug("machine__load_vmlinux_path ");
+               pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
+               err = TEST_SKIP;
                goto out;
        }
 
@@ -226,5 +227,7 @@ detour:
                        map__fprintf(pos, stderr);
        }
 out:
+       machine__exit(&kallsyms);
+       machine__exit(&vmlinux);
        return err;
 }
index 4aeb7d5df93933fd8babfafaadd47c755cbb9671..809ea4632a34fc0f90e766eb3cf16175904cbefa 100644 (file)
@@ -273,6 +273,8 @@ void ui_browser__hide(struct ui_browser *browser __maybe_unused)
 {
        pthread_mutex_lock(&ui__lock);
        ui_helpline__pop();
+       free(browser->helpline);
+       browser->helpline = NULL;
        pthread_mutex_unlock(&ui__lock);
 }
 
@@ -471,7 +473,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
        return row;
 }
 
-static struct ui_browser__colorset {
+static struct ui_browser_colorset {
        const char *name, *fg, *bg;
        int colorset;
 } ui_browser__colorsets[] = {
@@ -706,7 +708,7 @@ void ui_browser__init(void)
        perf_config(ui_browser__color_config, NULL);
 
        while (ui_browser__colorsets[i].name) {
-               struct ui_browser__colorset *c = &ui_browser__colorsets[i++];
+               struct ui_browser_colorset *c = &ui_browser__colorsets[i++];
                sltt_set_color(c->colorset, c->name, c->fg, c->bg);
        }
 
index 5dab3ca96980baf107158352b149799636002055..7dca1555c6109bbb8a85611397f984245235de21 100644 (file)
@@ -182,6 +182,16 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                ab->selection = dl;
 }
 
+static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym)
+{
+       if (!dl || !dl->ins || !ins__is_jump(dl->ins)
+           || !disasm_line__has_offset(dl)
+           || dl->ops.target.offset >= symbol__size(sym))
+               return false;
+
+       return true;
+}
+
 static void annotate_browser__draw_current_jump(struct ui_browser *browser)
 {
        struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
@@ -195,8 +205,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
        if (strstr(sym->name, "@plt"))
                return;
 
-       if (!cursor || !cursor->ins || !ins__is_jump(cursor->ins) ||
-           !disasm_line__has_offset(cursor))
+       if (!disasm_line__is_valid_jump(cursor, sym))
                return;
 
        target = ab->offsets[cursor->ops.target.offset];
@@ -788,17 +797,9 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
                struct disasm_line *dl = browser->offsets[offset], *dlt;
                struct browser_disasm_line *bdlt;
 
-               if (!dl || !dl->ins || !ins__is_jump(dl->ins) ||
-                   !disasm_line__has_offset(dl))
+               if (!disasm_line__is_valid_jump(dl, sym))
                        continue;
 
-               if (dl->ops.target.offset >= size) {
-                       ui__error("jump to after symbol!\n"
-                                 "size: %zx, jump target: %" PRIx64,
-                                 size, dl->ops.target.offset);
-                       continue;
-               }
-
                dlt = browser->offsets[dl->ops.target.offset];
                /*
                 * FIXME: Oops, no jump target? Buggy disassembler? Or do we
@@ -921,11 +922,11 @@ out_free_offsets:
 
 #define ANNOTATE_CFG(n) \
        { .name = #n, .value = &annotate_browser__opts.n, }
-       
+
 /*
  * Keep the entries sorted, they are bsearch'ed
  */
-static struct annotate__config {
+static struct annotate_config {
        const char *name;
        bool *value;
 } annotate__configs[] = {
@@ -939,7 +940,7 @@ static struct annotate__config {
 
 static int annotate_config__cmp(const void *name, const void *cfgp)
 {
-       const struct annotate__config *cfg = cfgp;
+       const struct annotate_config *cfg = cfgp;
 
        return strcmp(name, cfg->name);
 }
@@ -947,7 +948,7 @@ static int annotate_config__cmp(const void *name, const void *cfgp)
 static int annotate__config(const char *var, const char *value,
                            void *data __maybe_unused)
 {
-       struct annotate__config *cfg;
+       struct annotate_config *cfg;
        const char *name;
 
        if (prefixcmp(var, "annotate.") != 0)
@@ -955,7 +956,7 @@ static int annotate__config(const char *var, const char *value,
 
        name = var + 9;
        cfg = bsearch(name, annotate__configs, ARRAY_SIZE(annotate__configs),
-                     sizeof(struct annotate__config), annotate_config__cmp);
+                     sizeof(struct annotate_config), annotate_config__cmp);
 
        if (cfg == NULL)
                return -1;
index ccc4bd16142094d7a1a0617ae72f141ad0058a1a..aa22704047d684caf07806e4a39d4e43a9f9fe28 100644 (file)
@@ -567,26 +567,128 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
        return row - first_row;
 }
 
-#define HPP__COLOR_FN(_name, _field)                                   \
-static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp,     \
-                                            struct hist_entry *he)     \
+struct hpp_arg {
+       struct ui_browser *b;
+       char folded_sign;
+       bool current_entry;
+};
+
+static int __hpp__color_callchain(struct hpp_arg *arg)
+{
+       if (!symbol_conf.use_callchain)
+               return 0;
+
+       slsmg_printf("%c ", arg->folded_sign);
+       return 2;
+}
+
+static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
+                           u64 (*get_field)(struct hist_entry *),
+                           int (*callchain_cb)(struct hpp_arg *))
+{
+       int ret = 0;
+       double percent = 0.0;
+       struct hists *hists = he->hists;
+       struct hpp_arg *arg = hpp->ptr;
+
+       if (hists->stats.total_period)
+               percent = 100.0 * get_field(he) / hists->stats.total_period;
+
+       ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
+
+       if (callchain_cb)
+               ret += callchain_cb(arg);
+
+       ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
+       slsmg_printf("%s", hpp->buf);
+
+       if (symbol_conf.event_group) {
+               int prev_idx, idx_delta;
+               struct perf_evsel *evsel = hists_to_evsel(hists);
+               struct hist_entry *pair;
+               int nr_members = evsel->nr_members;
+
+               if (nr_members <= 1)
+                       goto out;
+
+               prev_idx = perf_evsel__group_idx(evsel);
+
+               list_for_each_entry(pair, &he->pairs.head, pairs.node) {
+                       u64 period = get_field(pair);
+                       u64 total = pair->hists->stats.total_period;
+
+                       if (!total)
+                               continue;
+
+                       evsel = hists_to_evsel(pair->hists);
+                       idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
+
+                       while (idx_delta--) {
+                               /*
+                                * zero-fill group members in the middle which
+                                * have no sample
+                                */
+                               ui_browser__set_percent_color(arg->b, 0.0,
+                                                       arg->current_entry);
+                               ret += scnprintf(hpp->buf, hpp->size,
+                                                " %6.2f%%", 0.0);
+                               slsmg_printf("%s", hpp->buf);
+                       }
+
+                       percent = 100.0 * period / total;
+                       ui_browser__set_percent_color(arg->b, percent,
+                                                     arg->current_entry);
+                       ret += scnprintf(hpp->buf, hpp->size,
+                                        " %6.2f%%", percent);
+                       slsmg_printf("%s", hpp->buf);
+
+                       prev_idx = perf_evsel__group_idx(evsel);
+               }
+
+               idx_delta = nr_members - prev_idx - 1;
+
+               while (idx_delta--) {
+                       /*
+                        * zero-fill group members at last which have no sample
+                        */
+                       ui_browser__set_percent_color(arg->b, 0.0,
+                                                     arg->current_entry);
+                       ret += scnprintf(hpp->buf, hpp->size,
+                                        " %6.2f%%", 0.0);
+                       slsmg_printf("%s", hpp->buf);
+               }
+       }
+out:
+       if (!arg->current_entry || !arg->b->navkeypressed)
+               ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
+
+       return ret;
+}
+
+#define __HPP_COLOR_PERCENT_FN(_type, _field, _cb)                     \
+static u64 __hpp_get_##_field(struct hist_entry *he)                   \
+{                                                                      \
+       return he->stat._field;                                         \
+}                                                                      \
+                                                                       \
+static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp,       \
+                                          struct hist_entry *he)       \
 {                                                                      \
-       struct hists *hists = he->hists;                                \
-       double percent = 100.0 * he->stat._field / hists->stats.total_period; \
-       *(double *)hpp->ptr = percent;                                  \
-       return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);      \
+       return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb);      \
 }
 
-HPP__COLOR_FN(overhead, period)
-HPP__COLOR_FN(overhead_sys, period_sys)
-HPP__COLOR_FN(overhead_us, period_us)
-HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
-HPP__COLOR_FN(overhead_guest_us, period_guest_us)
+__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
+__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
+__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
+__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
+__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
 
-#undef HPP__COLOR_FN
+#undef __HPP_COLOR_PERCENT_FN
 
 void hist_browser__init_hpp(void)
 {
+       perf_hpp__column_enable(PERF_HPP__OVERHEAD);
+
        perf_hpp__init();
 
        perf_hpp__format[PERF_HPP__OVERHEAD].color =
@@ -606,13 +708,13 @@ static int hist_browser__show_entry(struct hist_browser *browser,
                                    unsigned short row)
 {
        char s[256];
-       double percent;
-       int i, printed = 0;
+       int printed = 0;
        int width = browser->b.width;
        char folded_sign = ' ';
        bool current_entry = ui_browser__is_current_entry(&browser->b, row);
        off_t row_offset = entry->row_offset;
        bool first = true;
+       struct perf_hpp_fmt *fmt;
 
        if (current_entry) {
                browser->he_selection = entry;
@@ -625,41 +727,30 @@ static int hist_browser__show_entry(struct hist_browser *browser,
        }
 
        if (row_offset == 0) {
+               struct hpp_arg arg = {
+                       .b              = &browser->b,
+                       .folded_sign    = folded_sign,
+                       .current_entry  = current_entry,
+               };
                struct perf_hpp hpp = {
                        .buf            = s,
                        .size           = sizeof(s),
+                       .ptr            = &arg,
                };
 
                ui_browser__gotorc(&browser->b, row, 0);
 
-               for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
-                       if (!perf_hpp__format[i].cond)
-                               continue;
-
+               perf_hpp__for_each_format(fmt) {
                        if (!first) {
                                slsmg_printf("  ");
                                width -= 2;
                        }
                        first = false;
 
-                       if (perf_hpp__format[i].color) {
-                               hpp.ptr = &percent;
-                               /* It will set percent for us. See HPP__COLOR_FN above. */
-                               width -= perf_hpp__format[i].color(&hpp, entry);
-
-                               ui_browser__set_percent_color(&browser->b, percent, current_entry);
-
-                               if (i == PERF_HPP__OVERHEAD && symbol_conf.use_callchain) {
-                                       slsmg_printf("%c ", folded_sign);
-                                       width -= 2;
-                               }
-
-                               slsmg_printf("%s", s);
-
-                               if (!current_entry || !browser->b.navkeypressed)
-                                       ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL);
+                       if (fmt->color) {
+                               width -= fmt->color(&hpp, entry);
                        } else {
-                               width -= perf_hpp__format[i].entry(&hpp, entry);
+                               width -= fmt->entry(&hpp, entry);
                                slsmg_printf("%s", s);
                        }
                }
@@ -1098,6 +1189,21 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size,
        const struct thread *thread = hists->thread_filter;
        unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
        u64 nr_events = hists->stats.total_period;
+       struct perf_evsel *evsel = hists_to_evsel(hists);
+       char buf[512];
+       size_t buflen = sizeof(buf);
+
+       if (symbol_conf.event_group && evsel->nr_members > 1) {
+               struct perf_evsel *pos;
+
+               perf_evsel__group_desc(evsel, buf, buflen);
+               ev_name = buf;
+
+               for_each_group_member(pos, evsel) {
+                       nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
+                       nr_events += pos->hists.stats.total_period;
+               }
+       }
 
        nr_samples = convert_unit(nr_samples, &unit);
        printed = scnprintf(bf, size,
@@ -1135,6 +1241,96 @@ static inline bool is_report_browser(void *timer)
        return timer == NULL;
 }
 
+/*
+ * Only runtime switching of perf data file will make "input_name" point
+ * to a malloced buffer. So add "is_input_name_malloced" flag to decide
+ * whether we need to call free() for current "input_name" during the switch.
+ */
+static bool is_input_name_malloced = false;
+
+static int switch_data_file(void)
+{
+       char *pwd, *options[32], *abs_path[32], *tmp;
+       DIR *pwd_dir;
+       int nr_options = 0, choice = -1, ret = -1;
+       struct dirent *dent;
+
+       pwd = getenv("PWD");
+       if (!pwd)
+               return ret;
+
+       pwd_dir = opendir(pwd);
+       if (!pwd_dir)
+               return ret;
+
+       memset(options, 0, sizeof(options));
+       memset(options, 0, sizeof(abs_path));
+
+       while ((dent = readdir(pwd_dir))) {
+               char path[PATH_MAX];
+               u64 magic;
+               char *name = dent->d_name;
+               FILE *file;
+
+               if (!(dent->d_type == DT_REG))
+                       continue;
+
+               snprintf(path, sizeof(path), "%s/%s", pwd, name);
+
+               file = fopen(path, "r");
+               if (!file)
+                       continue;
+
+               if (fread(&magic, 1, 8, file) < 8)
+                       goto close_file_and_continue;
+
+               if (is_perf_magic(magic)) {
+                       options[nr_options] = strdup(name);
+                       if (!options[nr_options])
+                               goto close_file_and_continue;
+
+                       abs_path[nr_options] = strdup(path);
+                       if (!abs_path[nr_options]) {
+                               free(options[nr_options]);
+                               ui__warning("Can't search all data files due to memory shortage.\n");
+                               fclose(file);
+                               break;
+                       }
+
+                       nr_options++;
+               }
+
+close_file_and_continue:
+               fclose(file);
+               if (nr_options >= 32) {
+                       ui__warning("Too many perf data files in PWD!\n"
+                                   "Only the first 32 files will be listed.\n");
+                       break;
+               }
+       }
+       closedir(pwd_dir);
+
+       if (nr_options) {
+               choice = ui__popup_menu(nr_options, options);
+               if (choice < nr_options && choice >= 0) {
+                       tmp = strdup(abs_path[choice]);
+                       if (tmp) {
+                               if (is_input_name_malloced)
+                                       free((void *)input_name);
+                               input_name = tmp;
+                               is_input_name_malloced = true;
+                               ret = 0;
+                       } else
+                               ui__warning("Data switch failed due to memory shortage!\n");
+               }
+       }
+
+       free_popup_options(options, nr_options);
+       free_popup_options(abs_path, nr_options);
+       return ret;
+}
+
+
 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    const char *helpline, const char *ev_name,
                                    bool left_exits,
@@ -1169,7 +1365,8 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                int choice = 0,
                    annotate = -2, zoom_dso = -2, zoom_thread = -2,
                    annotate_f = -2, annotate_t = -2, browse_map = -2;
-               int scripts_comm = -2, scripts_symbol = -2, scripts_all = -2;
+               int scripts_comm = -2, scripts_symbol = -2,
+                   scripts_all = -2, switch_data = -2;
 
                nr_options = 0;
 
@@ -1226,6 +1423,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        if (is_report_browser(hbt))
                                goto do_scripts;
                        continue;
+               case 's':
+                       if (is_report_browser(hbt))
+                               goto do_data_switch;
+                       continue;
                case K_F1:
                case 'h':
                case '?':
@@ -1245,6 +1446,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        "d             Zoom into current DSO\n"
                                        "t             Zoom into current Thread\n"
                                        "r             Run available scripts('perf report' only)\n"
+                                       "s             Switch to another data file in PWD ('perf report' only)\n"
                                        "P             Print histograms to perf.hist.N\n"
                                        "V             Verbose (DSO names in callchains, etc)\n"
                                        "/             Filter symbol by name");
@@ -1352,6 +1554,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                if (asprintf(&options[nr_options], "Run scripts for all samples") > 0)
                        scripts_all = nr_options++;
 
+               if (is_report_browser(hbt) && asprintf(&options[nr_options],
+                               "Switch to another data file in PWD") > 0)
+                       switch_data = nr_options++;
 add_exit_option:
                options[nr_options++] = (char *)"Exit";
 retry_popup_menu:
@@ -1462,6 +1667,16 @@ do_scripts:
 
                        script_browse(script_opt);
                }
+               /* Switch to another data file */
+               else if (choice == switch_data) {
+do_data_switch:
+                       if (!switch_data_file()) {
+                               key = K_SWITCH_INPUT_DATA;
+                               break;
+                       } else
+                               ui__warning("Won't switch the data files due to\n"
+                                       "no valid data file get selected!\n");
+               }
        }
 out_free_stack:
        pstack__delete(fstack);
@@ -1494,6 +1709,16 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
        ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
                                                       HE_COLORSET_NORMAL);
 
+       if (symbol_conf.event_group && evsel->nr_members > 1) {
+               struct perf_evsel *pos;
+
+               ev_name = perf_evsel__group_name(evsel);
+
+               for_each_group_member(pos, evsel) {
+                       nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
+               }
+       }
+
        nr_events = convert_unit(nr_events, &unit);
        printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
                           unit, unit == ' ' ? "" : " ", ev_name);
@@ -1578,6 +1803,7 @@ browse_hists:
                                                "Do you really want to exit?"))
                                        continue;
                                /* Fall thru */
+                       case K_SWITCH_INPUT_DATA:
                        case 'q':
                        case CTRL('c'):
                                goto out;
@@ -1604,8 +1830,19 @@ out:
        return key;
 }
 
+static bool filter_group_entries(struct ui_browser *self __maybe_unused,
+                                void *entry)
+{
+       struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
+
+       if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel))
+               return true;
+
+       return false;
+}
+
 static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
-                                          const char *help,
+                                          int nr_entries, const char *help,
                                           struct hist_browser_timer *hbt,
                                           struct perf_session_env *env)
 {
@@ -1616,7 +1853,8 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
                        .refresh    = ui_browser__list_head_refresh,
                        .seek       = ui_browser__list_head_seek,
                        .write      = perf_evsel_menu__write,
-                       .nr_entries = evlist->nr_entries,
+                       .filter     = filter_group_entries,
+                       .nr_entries = nr_entries,
                        .priv       = evlist,
                },
                .env = env,
@@ -1632,20 +1870,37 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
                        menu.b.width = line_len;
        }
 
-       return perf_evsel_menu__run(&menu, evlist->nr_entries, help, hbt);
+       return perf_evsel_menu__run(&menu, nr_entries, help, hbt);
 }
 
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
                                  struct hist_browser_timer *hbt,
                                  struct perf_session_env *env)
 {
-       if (evlist->nr_entries == 1) {
+       int nr_entries = evlist->nr_entries;
+
+single_entry:
+       if (nr_entries == 1) {
                struct perf_evsel *first = list_entry(evlist->entries.next,
                                                      struct perf_evsel, node);
                const char *ev_name = perf_evsel__name(first);
-               return perf_evsel__hists_browse(first, evlist->nr_entries, help,
+
+               return perf_evsel__hists_browse(first, nr_entries, help,
                                                ev_name, false, hbt, env);
        }
 
-       return __perf_evlist__tui_browse_hists(evlist, help, hbt, env);
+       if (symbol_conf.event_group) {
+               struct perf_evsel *pos;
+
+               nr_entries = 0;
+               list_for_each_entry(pos, &evlist->entries, node)
+                       if (perf_evsel__is_group_leader(pos))
+                               nr_entries++;
+
+               if (nr_entries == 1)
+                       goto single_entry;
+       }
+
+       return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
+                                              hbt, env);
 }
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
new file mode 100644 (file)
index 0000000..7d8dc58
--- /dev/null
@@ -0,0 +1,229 @@
+#include "gtk.h"
+#include "util/debug.h"
+#include "util/annotate.h"
+#include "ui/helpline.h"
+
+
+enum {
+       ANN_COL__PERCENT,
+       ANN_COL__OFFSET,
+       ANN_COL__LINE,
+
+       MAX_ANN_COLS
+};
+
+static const char *const col_names[] = {
+       "Overhead",
+       "Offset",
+       "Line"
+};
+
+static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
+                                struct disasm_line *dl, int evidx)
+{
+       struct sym_hist *symhist;
+       double percent = 0.0;
+       const char *markup;
+       int ret = 0;
+
+       strcpy(buf, "");
+
+       if (dl->offset == (s64) -1)
+               return 0;
+
+       symhist = annotation__histogram(symbol__annotation(sym), evidx);
+       if (!symhist->addr[dl->offset])
+               return 0;
+
+       percent = 100.0 * symhist->addr[dl->offset] / symhist->sum;
+
+       markup = perf_gtk__get_percent_color(percent);
+       if (markup)
+               ret += scnprintf(buf, size, "%s", markup);
+       ret += scnprintf(buf + ret, size - ret, "%6.2f%%", percent);
+       if (markup)
+               ret += scnprintf(buf + ret, size - ret, "</span>");
+
+       return ret;
+}
+
+static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
+                               struct map *map, struct disasm_line *dl)
+{
+       u64 start = map__rip_2objdump(map, sym->start);
+
+       strcpy(buf, "");
+
+       if (dl->offset == (s64) -1)
+               return 0;
+
+       return scnprintf(buf, size, "%"PRIx64, start + dl->offset);
+}
+
+static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
+{
+       int ret = 0;
+       char *line = g_markup_escape_text(dl->line, -1);
+       const char *markup = "<span fgcolor='gray'>";
+
+       strcpy(buf, "");
+
+       if (!line)
+               return 0;
+
+       if (dl->offset != (s64) -1)
+               markup = NULL;
+
+       if (markup)
+               ret += scnprintf(buf, size, "%s", markup);
+       ret += scnprintf(buf + ret, size - ret, "%s", line);
+       if (markup)
+               ret += scnprintf(buf + ret, size - ret, "</span>");
+
+       g_free(line);
+       return ret;
+}
+
+static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
+                               struct map *map, int evidx,
+                               struct hist_browser_timer *hbt __maybe_unused)
+{
+       struct disasm_line *pos, *n;
+       struct annotation *notes;
+       GType col_types[MAX_ANN_COLS];
+       GtkCellRenderer *renderer;
+       GtkListStore *store;
+       GtkWidget *view;
+       int i;
+       char s[512];
+
+       notes = symbol__annotation(sym);
+
+       for (i = 0; i < MAX_ANN_COLS; i++) {
+               col_types[i] = G_TYPE_STRING;
+       }
+       store = gtk_list_store_newv(MAX_ANN_COLS, col_types);
+
+       view = gtk_tree_view_new();
+       renderer = gtk_cell_renderer_text_new();
+
+       for (i = 0; i < MAX_ANN_COLS; i++) {
+               gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+                                       -1, col_names[i], renderer, "markup",
+                                       i, NULL);
+       }
+
+       gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+       g_object_unref(GTK_TREE_MODEL(store));
+
+       list_for_each_entry(pos, &notes->src->source, node) {
+               GtkTreeIter iter;
+
+               gtk_list_store_append(store, &iter);
+
+               if (perf_gtk__get_percent(s, sizeof(s), sym, pos, evidx))
+                       gtk_list_store_set(store, &iter, ANN_COL__PERCENT, s, -1);
+               if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos))
+                       gtk_list_store_set(store, &iter, ANN_COL__OFFSET, s, -1);
+               if (perf_gtk__get_line(s, sizeof(s), pos))
+                       gtk_list_store_set(store, &iter, ANN_COL__LINE, s, -1);
+       }
+
+       gtk_container_add(GTK_CONTAINER(window), view);
+
+       list_for_each_entry_safe(pos, n, &notes->src->source, node) {
+               list_del(&pos->node);
+               disasm_line__free(pos);
+       }
+
+       return 0;
+}
+
+int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
+                        struct hist_browser_timer *hbt)
+{
+       GtkWidget *window;
+       GtkWidget *notebook;
+       GtkWidget *scrolled_window;
+       GtkWidget *tab_label;
+
+       if (map->dso->annotate_warned)
+               return -1;
+
+       if (symbol__annotate(sym, map, 0) < 0) {
+               ui__error("%s", ui_helpline__current);
+               return -1;
+       }
+
+       if (perf_gtk__is_active_context(pgctx)) {
+               window = pgctx->main_window;
+               notebook = pgctx->notebook;
+       } else {
+               GtkWidget *vbox;
+               GtkWidget *infobar;
+               GtkWidget *statbar;
+
+               signal(SIGSEGV, perf_gtk__signal);
+               signal(SIGFPE,  perf_gtk__signal);
+               signal(SIGINT,  perf_gtk__signal);
+               signal(SIGQUIT, perf_gtk__signal);
+               signal(SIGTERM, perf_gtk__signal);
+
+               window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
+               gtk_window_set_title(GTK_WINDOW(window), "perf annotate");
+
+               g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+
+               pgctx = perf_gtk__activate_context(window);
+               if (!pgctx)
+                       return -1;
+
+               vbox = gtk_vbox_new(FALSE, 0);
+               notebook = gtk_notebook_new();
+               pgctx->notebook = notebook;
+
+               gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
+
+               infobar = perf_gtk__setup_info_bar();
+               if (infobar) {
+                       gtk_box_pack_start(GTK_BOX(vbox), infobar,
+                                          FALSE, FALSE, 0);
+               }
+
+               statbar = perf_gtk__setup_statusbar();
+               gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
+
+               gtk_container_add(GTK_CONTAINER(window), vbox);
+       }
+
+       scrolled_window = gtk_scrolled_window_new(NULL, NULL);
+       tab_label = gtk_label_new(sym->name);
+
+       gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
+                                      GTK_POLICY_AUTOMATIC,
+                                      GTK_POLICY_AUTOMATIC);
+
+       gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window,
+                                tab_label);
+
+       perf_gtk__annotate_symbol(scrolled_window, sym, map, evidx, hbt);
+       return 0;
+}
+
+void perf_gtk__show_annotations(void)
+{
+       GtkWidget *window;
+
+       if (!perf_gtk__is_active_context(pgctx))
+               return;
+
+       window = pgctx->main_window;
+       gtk_widget_show_all(window);
+
+       perf_gtk__resize_window(window);
+       gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+
+       gtk_main();
+
+       perf_gtk__deactivate_context(&pgctx);
+}
index 253b6219a39efbdd119f358571c22f5a83a7aef8..c95012cdb4387f95a4125f7da3f58cf224f33dc8 100644 (file)
@@ -8,15 +8,13 @@
 
 #include <signal.h>
 
-#define MAX_COLUMNS                    32
-
-static void perf_gtk__signal(int sig)
+void perf_gtk__signal(int sig)
 {
        perf_gtk__exit(false);
        psignal(sig, "perf");
 }
 
-static void perf_gtk__resize_window(GtkWidget *window)
+void perf_gtk__resize_window(GtkWidget *window)
 {
        GdkRectangle rect;
        GdkScreen *screen;
@@ -36,7 +34,7 @@ static void perf_gtk__resize_window(GtkWidget *window)
        gtk_window_resize(GTK_WINDOW(window), width, height);
 }
 
-static const char *perf_gtk__get_percent_color(double percent)
+const char *perf_gtk__get_percent_color(double percent)
 {
        if (percent >= MIN_RED)
                return "<span fgcolor='red'>";
@@ -45,155 +43,8 @@ static const char *perf_gtk__get_percent_color(double percent)
        return NULL;
 }
 
-#define HPP__COLOR_FN(_name, _field)                                           \
-static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp,                 \
-                                        struct hist_entry *he)                 \
-{                                                                              \
-       struct hists *hists = he->hists;                                        \
-       double percent = 100.0 * he->stat._field / hists->stats.total_period;   \
-       const char *markup;                                                     \
-       int ret = 0;                                                            \
-                                                                               \
-       markup = perf_gtk__get_percent_color(percent);                          \
-       if (markup)                                                             \
-               ret += scnprintf(hpp->buf, hpp->size, "%s", markup);            \
-       ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent);  \
-       if (markup)                                                             \
-               ret += scnprintf(hpp->buf + ret, hpp->size - ret, "</span>");   \
-                                                                               \
-       return ret;                                                             \
-}
-
-HPP__COLOR_FN(overhead, period)
-HPP__COLOR_FN(overhead_sys, period_sys)
-HPP__COLOR_FN(overhead_us, period_us)
-HPP__COLOR_FN(overhead_guest_sys, period_guest_sys)
-HPP__COLOR_FN(overhead_guest_us, period_guest_us)
-
-#undef HPP__COLOR_FN
-
-void perf_gtk__init_hpp(void)
-{
-       perf_hpp__init();
-
-       perf_hpp__format[PERF_HPP__OVERHEAD].color =
-                               perf_gtk__hpp_color_overhead;
-       perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
-                               perf_gtk__hpp_color_overhead_sys;
-       perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
-                               perf_gtk__hpp_color_overhead_us;
-       perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
-                               perf_gtk__hpp_color_overhead_guest_sys;
-       perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
-                               perf_gtk__hpp_color_overhead_guest_us;
-}
-
-static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
-{
-       GType col_types[MAX_COLUMNS];
-       GtkCellRenderer *renderer;
-       struct sort_entry *se;
-       GtkListStore *store;
-       struct rb_node *nd;
-       GtkWidget *view;
-       int i, col_idx;
-       int nr_cols;
-       char s[512];
-
-       struct perf_hpp hpp = {
-               .buf            = s,
-               .size           = sizeof(s),
-       };
-
-       nr_cols = 0;
-
-       for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
-               if (!perf_hpp__format[i].cond)
-                       continue;
-
-               col_types[nr_cols++] = G_TYPE_STRING;
-       }
-
-       list_for_each_entry(se, &hist_entry__sort_list, list) {
-               if (se->elide)
-                       continue;
-
-               col_types[nr_cols++] = G_TYPE_STRING;
-       }
-
-       store = gtk_list_store_newv(nr_cols, col_types);
-
-       view = gtk_tree_view_new();
-
-       renderer = gtk_cell_renderer_text_new();
-
-       col_idx = 0;
-
-       for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
-               if (!perf_hpp__format[i].cond)
-                       continue;
-
-               perf_hpp__format[i].header(&hpp);
-
-               gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-                                                           -1, s,
-                                                           renderer, "markup",
-                                                           col_idx++, NULL);
-       }
-
-       list_for_each_entry(se, &hist_entry__sort_list, list) {
-               if (se->elide)
-                       continue;
-
-               gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-                                                           -1, se->se_header,
-                                                           renderer, "text",
-                                                           col_idx++, NULL);
-       }
-
-       gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
-
-       g_object_unref(GTK_TREE_MODEL(store));
-
-       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
-               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-               GtkTreeIter iter;
-
-               if (h->filtered)
-                       continue;
-
-               gtk_list_store_append(store, &iter);
-
-               col_idx = 0;
-
-               for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
-                       if (!perf_hpp__format[i].cond)
-                               continue;
-
-                       if (perf_hpp__format[i].color)
-                               perf_hpp__format[i].color(&hpp, h);
-                       else
-                               perf_hpp__format[i].entry(&hpp, h);
-
-                       gtk_list_store_set(store, &iter, col_idx++, s, -1);
-               }
-
-               list_for_each_entry(se, &hist_entry__sort_list, list) {
-                       if (se->elide)
-                               continue;
-
-                       se->se_snprintf(h, s, ARRAY_SIZE(s),
-                                       hists__col_len(hists, se->se_width_idx));
-
-                       gtk_list_store_set(store, &iter, col_idx++, s, -1);
-               }
-       }
-
-       gtk_container_add(GTK_CONTAINER(window), view);
-}
-
 #ifdef HAVE_GTK_INFO_BAR
-static GtkWidget *perf_gtk__setup_info_bar(void)
+GtkWidget *perf_gtk__setup_info_bar(void)
 {
        GtkWidget *info_bar;
        GtkWidget *label;
@@ -220,7 +71,7 @@ static GtkWidget *perf_gtk__setup_info_bar(void)
 }
 #endif
 
-static GtkWidget *perf_gtk__setup_statusbar(void)
+GtkWidget *perf_gtk__setup_statusbar(void)
 {
        GtkWidget *stbar;
        unsigned ctxid;
@@ -234,79 +85,3 @@ static GtkWidget *perf_gtk__setup_statusbar(void)
 
        return stbar;
 }
-
-int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
-                                 const char *help,
-                                 struct hist_browser_timer *hbt __maybe_unused)
-{
-       struct perf_evsel *pos;
-       GtkWidget *vbox;
-       GtkWidget *notebook;
-       GtkWidget *info_bar;
-       GtkWidget *statbar;
-       GtkWidget *window;
-
-       signal(SIGSEGV, perf_gtk__signal);
-       signal(SIGFPE,  perf_gtk__signal);
-       signal(SIGINT,  perf_gtk__signal);
-       signal(SIGQUIT, perf_gtk__signal);
-       signal(SIGTERM, perf_gtk__signal);
-
-       window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
-
-       gtk_window_set_title(GTK_WINDOW(window), "perf report");
-
-       g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
-
-       pgctx = perf_gtk__activate_context(window);
-       if (!pgctx)
-               return -1;
-
-       vbox = gtk_vbox_new(FALSE, 0);
-
-       notebook = gtk_notebook_new();
-
-       list_for_each_entry(pos, &evlist->entries, node) {
-               struct hists *hists = &pos->hists;
-               const char *evname = perf_evsel__name(pos);
-               GtkWidget *scrolled_window;
-               GtkWidget *tab_label;
-
-               scrolled_window = gtk_scrolled_window_new(NULL, NULL);
-
-               gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
-                                                       GTK_POLICY_AUTOMATIC,
-                                                       GTK_POLICY_AUTOMATIC);
-
-               perf_gtk__show_hists(scrolled_window, hists);
-
-               tab_label = gtk_label_new(evname);
-
-               gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
-       }
-
-       gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
-
-       info_bar = perf_gtk__setup_info_bar();
-       if (info_bar)
-               gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
-
-       statbar = perf_gtk__setup_statusbar();
-       gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
-
-       gtk_container_add(GTK_CONTAINER(window), vbox);
-
-       gtk_widget_show_all(window);
-
-       perf_gtk__resize_window(window);
-
-       gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
-
-       ui_helpline__push(help);
-
-       gtk_main();
-
-       perf_gtk__deactivate_context(&pgctx);
-
-       return 0;
-}
index 856320e2cc05c796a5774de845f24fda78dcef3e..3d96785ef155044d006f4438bf35b17ee3717148 100644 (file)
@@ -10,6 +10,7 @@
 
 struct perf_gtk_context {
        GtkWidget *main_window;
+       GtkWidget *notebook;
 
 #ifdef HAVE_GTK_INFO_BAR
        GtkWidget *info_bar;
@@ -33,7 +34,14 @@ void perf_gtk__init_helpline(void);
 void perf_gtk__init_progress(void);
 void perf_gtk__init_hpp(void);
 
-#ifndef HAVE_GTK_INFO_BAR
+void perf_gtk__signal(int sig);
+void perf_gtk__resize_window(GtkWidget *window);
+const char *perf_gtk__get_percent_color(double percent);
+GtkWidget *perf_gtk__setup_statusbar(void);
+
+#ifdef HAVE_GTK_INFO_BAR
+GtkWidget *perf_gtk__setup_info_bar(void);
+#else
 static inline GtkWidget *perf_gtk__setup_info_bar(void)
 {
        return NULL;
index 5db4432ff12a0d3c538da300ae66a98c9622dfd8..3388cbd12186ac5848562e0de10faa7afa025982 100644 (file)
@@ -24,17 +24,7 @@ static void gtk_helpline_push(const char *msg)
                           pgctx->statbar_ctx_id, msg);
 }
 
-static struct ui_helpline gtk_helpline_fns = {
-       .pop    = gtk_helpline_pop,
-       .push   = gtk_helpline_push,
-};
-
-void perf_gtk__init_helpline(void)
-{
-       helpline_fns = &gtk_helpline_fns;
-}
-
-int perf_gtk__show_helpline(const char *fmt, va_list ap)
+static int gtk_helpline_show(const char *fmt, va_list ap)
 {
        int ret;
        char *ptr;
@@ -54,3 +44,14 @@ int perf_gtk__show_helpline(const char *fmt, va_list ap)
 
        return ret;
 }
+
+static struct ui_helpline gtk_helpline_fns = {
+       .pop    = gtk_helpline_pop,
+       .push   = gtk_helpline_push,
+       .show   = gtk_helpline_show,
+};
+
+void perf_gtk__init_helpline(void)
+{
+       helpline_fns = &gtk_helpline_fns;
+}
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
new file mode 100644 (file)
index 0000000..1e764a8
--- /dev/null
@@ -0,0 +1,312 @@
+#include "../evlist.h"
+#include "../cache.h"
+#include "../evsel.h"
+#include "../sort.h"
+#include "../hist.h"
+#include "../helpline.h"
+#include "gtk.h"
+
+#define MAX_COLUMNS                    32
+
+static int __percent_color_snprintf(char *buf, size_t size, double percent)
+{
+       int ret = 0;
+       const char *markup;
+
+       markup = perf_gtk__get_percent_color(percent);
+       if (markup)
+               ret += scnprintf(buf, size, markup);
+
+       ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent);
+
+       if (markup)
+               ret += scnprintf(buf + ret, size - ret, "</span>");
+
+       return ret;
+}
+
+
+static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
+                           u64 (*get_field)(struct hist_entry *))
+{
+       int ret;
+       double percent = 0.0;
+       struct hists *hists = he->hists;
+
+       if (hists->stats.total_period)
+               percent = 100.0 * get_field(he) / hists->stats.total_period;
+
+       ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
+
+       if (symbol_conf.event_group) {
+               int prev_idx, idx_delta;
+               struct perf_evsel *evsel = hists_to_evsel(hists);
+               struct hist_entry *pair;
+               int nr_members = evsel->nr_members;
+
+               if (nr_members <= 1)
+                       return ret;
+
+               prev_idx = perf_evsel__group_idx(evsel);
+
+               list_for_each_entry(pair, &he->pairs.head, pairs.node) {
+                       u64 period = get_field(pair);
+                       u64 total = pair->hists->stats.total_period;
+
+                       evsel = hists_to_evsel(pair->hists);
+                       idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
+
+                       while (idx_delta--) {
+                               /*
+                                * zero-fill group members in the middle which
+                                * have no sample
+                                */
+                               ret += __percent_color_snprintf(hpp->buf + ret,
+                                                               hpp->size - ret,
+                                                               0.0);
+                       }
+
+                       percent = 100.0 * period / total;
+                       ret += __percent_color_snprintf(hpp->buf + ret,
+                                                       hpp->size - ret,
+                                                       percent);
+
+                       prev_idx = perf_evsel__group_idx(evsel);
+               }
+
+               idx_delta = nr_members - prev_idx - 1;
+
+               while (idx_delta--) {
+                       /*
+                        * zero-fill group members at last which have no sample
+                        */
+                       ret += __percent_color_snprintf(hpp->buf + ret,
+                                                       hpp->size - ret,
+                                                       0.0);
+               }
+       }
+       return ret;
+}
+
+#define __HPP_COLOR_PERCENT_FN(_type, _field)                                  \
+static u64 he_get_##_field(struct hist_entry *he)                              \
+{                                                                              \
+       return he->stat._field;                                                 \
+}                                                                              \
+                                                                               \
+static int perf_gtk__hpp_color_##_type(struct perf_hpp *hpp,                   \
+                                      struct hist_entry *he)                   \
+{                                                                              \
+       return __hpp__color_fmt(hpp, he, he_get_##_field);                      \
+}
+
+__HPP_COLOR_PERCENT_FN(overhead, period)
+__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys)
+__HPP_COLOR_PERCENT_FN(overhead_us, period_us)
+__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys)
+__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
+
+#undef __HPP_COLOR_PERCENT_FN
+
+
+void perf_gtk__init_hpp(void)
+{
+       perf_hpp__column_enable(PERF_HPP__OVERHEAD);
+
+       perf_hpp__init();
+
+       perf_hpp__format[PERF_HPP__OVERHEAD].color =
+                               perf_gtk__hpp_color_overhead;
+       perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
+                               perf_gtk__hpp_color_overhead_sys;
+       perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
+                               perf_gtk__hpp_color_overhead_us;
+       perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
+                               perf_gtk__hpp_color_overhead_guest_sys;
+       perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
+                               perf_gtk__hpp_color_overhead_guest_us;
+}
+
+static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
+{
+       struct perf_hpp_fmt *fmt;
+       GType col_types[MAX_COLUMNS];
+       GtkCellRenderer *renderer;
+       struct sort_entry *se;
+       GtkListStore *store;
+       struct rb_node *nd;
+       GtkWidget *view;
+       int col_idx;
+       int nr_cols;
+       char s[512];
+
+       struct perf_hpp hpp = {
+               .buf            = s,
+               .size           = sizeof(s),
+               .ptr            = hists_to_evsel(hists),
+       };
+
+       nr_cols = 0;
+
+       perf_hpp__for_each_format(fmt)
+               col_types[nr_cols++] = G_TYPE_STRING;
+
+       list_for_each_entry(se, &hist_entry__sort_list, list) {
+               if (se->elide)
+                       continue;
+
+               col_types[nr_cols++] = G_TYPE_STRING;
+       }
+
+       store = gtk_list_store_newv(nr_cols, col_types);
+
+       view = gtk_tree_view_new();
+
+       renderer = gtk_cell_renderer_text_new();
+
+       col_idx = 0;
+
+       perf_hpp__for_each_format(fmt) {
+               fmt->header(&hpp);
+
+               gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+                                                           -1, ltrim(s),
+                                                           renderer, "markup",
+                                                           col_idx++, NULL);
+       }
+
+       list_for_each_entry(se, &hist_entry__sort_list, list) {
+               if (se->elide)
+                       continue;
+
+               gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
+                                                           -1, se->se_header,
+                                                           renderer, "text",
+                                                           col_idx++, NULL);
+       }
+
+       gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
+
+       g_object_unref(GTK_TREE_MODEL(store));
+
+       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+               GtkTreeIter iter;
+
+               if (h->filtered)
+                       continue;
+
+               gtk_list_store_append(store, &iter);
+
+               col_idx = 0;
+
+               perf_hpp__for_each_format(fmt) {
+                       if (fmt->color)
+                               fmt->color(&hpp, h);
+                       else
+                               fmt->entry(&hpp, h);
+
+                       gtk_list_store_set(store, &iter, col_idx++, s, -1);
+               }
+
+               list_for_each_entry(se, &hist_entry__sort_list, list) {
+                       if (se->elide)
+                               continue;
+
+                       se->se_snprintf(h, s, ARRAY_SIZE(s),
+                                       hists__col_len(hists, se->se_width_idx));
+
+                       gtk_list_store_set(store, &iter, col_idx++, s, -1);
+               }
+       }
+
+       gtk_container_add(GTK_CONTAINER(window), view);
+}
+
+int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
+                                 const char *help,
+                                 struct hist_browser_timer *hbt __maybe_unused)
+{
+       struct perf_evsel *pos;
+       GtkWidget *vbox;
+       GtkWidget *notebook;
+       GtkWidget *info_bar;
+       GtkWidget *statbar;
+       GtkWidget *window;
+
+       signal(SIGSEGV, perf_gtk__signal);
+       signal(SIGFPE,  perf_gtk__signal);
+       signal(SIGINT,  perf_gtk__signal);
+       signal(SIGQUIT, perf_gtk__signal);
+       signal(SIGTERM, perf_gtk__signal);
+
+       window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
+
+       gtk_window_set_title(GTK_WINDOW(window), "perf report");
+
+       g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+
+       pgctx = perf_gtk__activate_context(window);
+       if (!pgctx)
+               return -1;
+
+       vbox = gtk_vbox_new(FALSE, 0);
+
+       notebook = gtk_notebook_new();
+
+       gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
+
+       info_bar = perf_gtk__setup_info_bar();
+       if (info_bar)
+               gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
+
+       statbar = perf_gtk__setup_statusbar();
+       gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
+
+       gtk_container_add(GTK_CONTAINER(window), vbox);
+
+       list_for_each_entry(pos, &evlist->entries, node) {
+               struct hists *hists = &pos->hists;
+               const char *evname = perf_evsel__name(pos);
+               GtkWidget *scrolled_window;
+               GtkWidget *tab_label;
+               char buf[512];
+               size_t size = sizeof(buf);
+
+               if (symbol_conf.event_group) {
+                       if (!perf_evsel__is_group_leader(pos))
+                               continue;
+
+                       if (pos->nr_members > 1) {
+                               perf_evsel__group_desc(pos, buf, size);
+                               evname = buf;
+                       }
+               }
+
+               scrolled_window = gtk_scrolled_window_new(NULL, NULL);
+
+               gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW(scrolled_window),
+                                                       GTK_POLICY_AUTOMATIC,
+                                                       GTK_POLICY_AUTOMATIC);
+
+               perf_gtk__show_hists(scrolled_window, hists);
+
+               tab_label = gtk_label_new(evname);
+
+               gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
+       }
+
+       gtk_widget_show_all(window);
+
+       perf_gtk__resize_window(window);
+
+       gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER);
+
+       ui_helpline__push(help);
+
+       gtk_main();
+
+       perf_gtk__deactivate_context(&pgctx);
+
+       return 0;
+}
index a49bcf3c190b4066305cdf5af889087365d43608..700fb3cfa1c739128bc0c8009039f48e1f964c93 100644 (file)
@@ -16,9 +16,16 @@ static void nop_helpline__push(const char *msg __maybe_unused)
 {
 }
 
+static int nop_helpline__show(const char *fmt __maybe_unused,
+                              va_list ap __maybe_unused)
+{
+       return 0;
+}
+
 static struct ui_helpline default_helpline_fns = {
        .pop    = nop_helpline__pop,
        .push   = nop_helpline__push,
+       .show   = nop_helpline__show,
 };
 
 struct ui_helpline *helpline_fns = &default_helpline_fns;
@@ -59,3 +66,8 @@ void ui_helpline__puts(const char *msg)
        ui_helpline__pop();
        ui_helpline__push(msg);
 }
+
+int ui_helpline__vshow(const char *fmt, va_list ap)
+{
+       return helpline_fns->show(fmt, ap);
+}
index baa28a4d16b94dedafb02cf1f86fe821605fdf1d..46181f4fc07ebf2133e97898bd21acf5fe9c7551 100644 (file)
@@ -9,6 +9,7 @@
 struct ui_helpline {
        void (*pop)(void);
        void (*push)(const char *msg);
+       int  (*show)(const char *fmt, va_list ap);
 };
 
 extern struct ui_helpline *helpline_fns;
@@ -20,28 +21,9 @@ void ui_helpline__push(const char *msg);
 void ui_helpline__vpush(const char *fmt, va_list ap);
 void ui_helpline__fpush(const char *fmt, ...);
 void ui_helpline__puts(const char *msg);
+int  ui_helpline__vshow(const char *fmt, va_list ap);
 
 extern char ui_helpline__current[512];
-
-#ifdef NEWT_SUPPORT
 extern char ui_helpline__last_msg[];
-int ui_helpline__show_help(const char *format, va_list ap);
-#else
-static inline int ui_helpline__show_help(const char *format __maybe_unused,
-                                        va_list ap __maybe_unused)
-{
-       return 0;
-}
-#endif /* NEWT_SUPPORT */
-
-#ifdef GTK2_SUPPORT
-int perf_gtk__show_helpline(const char *format, va_list ap);
-#else
-static inline int perf_gtk__show_helpline(const char *format __maybe_unused,
-                                         va_list ap __maybe_unused)
-{
-       return 0;
-}
-#endif /* GTK2_SUPPORT */
 
 #endif /* _PERF_UI_HELPLINE_H_ */
index aa84130024d58b3956bce290e09fa3eb4e982c2f..d671e63aa351ce4f5ccbf864fe72db80b5d2ffd6 100644 (file)
 #include "../util/hist.h"
 #include "../util/util.h"
 #include "../util/sort.h"
-
+#include "../util/evsel.h"
 
 /* hist period print (hpp) functions */
-static int hpp__header_overhead(struct perf_hpp *hpp)
-{
-       return scnprintf(hpp->buf, hpp->size, "Overhead");
-}
-
-static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused)
-{
-       return 8;
-}
-
-static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period / hists->stats.total_period;
 
-       return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
-}
+typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...);
 
-static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he)
+static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+                     u64 (*get_field)(struct hist_entry *),
+                     const char *fmt, hpp_snprint_fn print_fn,
+                     bool fmt_percent)
 {
+       int ret;
        struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period / hists->stats.total_period;
-       const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%";
-
-       return scnprintf(hpp->buf, hpp->size, fmt, percent);
-}
 
-static int hpp__header_overhead_sys(struct perf_hpp *hpp)
-{
-       const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
-
-       return scnprintf(hpp->buf, hpp->size, fmt, "sys");
-}
+       if (fmt_percent) {
+               double percent = 0.0;
 
-static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused)
-{
-       return 7;
-}
+               if (hists->stats.total_period)
+                       percent = 100.0 * get_field(he) /
+                                 hists->stats.total_period;
 
-static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_sys / hists->stats.total_period;
+               ret = print_fn(hpp->buf, hpp->size, fmt, percent);
+       } else
+               ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
 
-       return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
-}
+       if (symbol_conf.event_group) {
+               int prev_idx, idx_delta;
+               struct perf_evsel *evsel = hists_to_evsel(hists);
+               struct hist_entry *pair;
+               int nr_members = evsel->nr_members;
 
-static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_sys / hists->stats.total_period;
-       const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
+               if (nr_members <= 1)
+                       return ret;
 
-       return scnprintf(hpp->buf, hpp->size, fmt, percent);
-}
+               prev_idx = perf_evsel__group_idx(evsel);
 
-static int hpp__header_overhead_us(struct perf_hpp *hpp)
-{
-       const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
+               list_for_each_entry(pair, &he->pairs.head, pairs.node) {
+                       u64 period = get_field(pair);
+                       u64 total = pair->hists->stats.total_period;
 
-       return scnprintf(hpp->buf, hpp->size, fmt, "user");
-}
+                       if (!total)
+                               continue;
 
-static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused)
-{
-       return 7;
-}
+                       evsel = hists_to_evsel(pair->hists);
+                       idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
 
-static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_us / hists->stats.total_period;
+                       while (idx_delta--) {
+                               /*
+                                * zero-fill group members in the middle which
+                                * have no sample
+                                */
+                               ret += print_fn(hpp->buf + ret, hpp->size - ret,
+                                               fmt, 0);
+                       }
 
-       return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
-}
+                       if (fmt_percent)
+                               ret += print_fn(hpp->buf + ret, hpp->size - ret,
+                                               fmt, 100.0 * period / total);
+                       else
+                               ret += print_fn(hpp->buf + ret, hpp->size - ret,
+                                               fmt, period);
 
-static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_us / hists->stats.total_period;
-       const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
-
-       return scnprintf(hpp->buf, hpp->size, fmt, percent);
-}
-
-static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp)
-{
-       return scnprintf(hpp->buf, hpp->size, "guest sys");
-}
-
-static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused)
-{
-       return 9;
-}
-
-static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp,
-                                        struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period;
-
-       return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
-}
-
-static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp,
-                                        struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_guest_sys / hists->stats.total_period;
-       const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
-
-       return scnprintf(hpp->buf, hpp->size, fmt, percent);
-}
-
-static int hpp__header_overhead_guest_us(struct perf_hpp *hpp)
-{
-       return scnprintf(hpp->buf, hpp->size, "guest usr");
-}
+                       prev_idx = perf_evsel__group_idx(evsel);
+               }
 
-static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused)
-{
-       return 9;
-}
+               idx_delta = nr_members - prev_idx - 1;
 
-static int hpp__color_overhead_guest_us(struct perf_hpp *hpp,
-                                       struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period;
-
-       return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent);
+               while (idx_delta--) {
+                       /*
+                        * zero-fill group members at last which have no sample
+                        */
+                       ret += print_fn(hpp->buf + ret, hpp->size - ret,
+                                       fmt, 0);
+               }
+       }
+       return ret;
 }
 
-static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp,
-                                       struct hist_entry *he)
-{
-       struct hists *hists = he->hists;
-       double percent = 100.0 * he->stat.period_guest_us / hists->stats.total_period;
-       const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% ";
+#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width)          \
+static int hpp__header_##_type(struct perf_hpp *hpp)                   \
+{                                                                      \
+       int len = _min_width;                                           \
+                                                                       \
+       if (symbol_conf.event_group) {                                  \
+               struct perf_evsel *evsel = hpp->ptr;                    \
+                                                                       \
+               len = max(len, evsel->nr_members * _unit_width);        \
+       }                                                               \
+       return scnprintf(hpp->buf, hpp->size, "%*s", len, _str);        \
+}
+
+#define __HPP_WIDTH_FN(_type, _min_width, _unit_width)                         \
+static int hpp__width_##_type(struct perf_hpp *hpp __maybe_unused)     \
+{                                                                      \
+       int len = _min_width;                                           \
+                                                                       \
+       if (symbol_conf.event_group) {                                  \
+               struct perf_evsel *evsel = hpp->ptr;                    \
+                                                                       \
+               len = max(len, evsel->nr_members * _unit_width);        \
+       }                                                               \
+       return len;                                                     \
+}
+
+#define __HPP_COLOR_PERCENT_FN(_type, _field)                                  \
+static u64 he_get_##_field(struct hist_entry *he)                              \
+{                                                                              \
+       return he->stat._field;                                                 \
+}                                                                              \
+                                                                               \
+static int hpp__color_##_type(struct perf_hpp *hpp, struct hist_entry *he)     \
+{                                                                              \
+       return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%",                 \
+                         (hpp_snprint_fn)percent_color_snprintf, true);        \
+}
+
+#define __HPP_ENTRY_PERCENT_FN(_type, _field)                                  \
+static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he)     \
+{                                                                              \
+       const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%";         \
+       return __hpp__fmt(hpp, he, he_get_##_field, fmt,                        \
+                         scnprintf, true);                                     \
+}
+
+#define __HPP_ENTRY_RAW_FN(_type, _field)                                      \
+static u64 he_get_raw_##_field(struct hist_entry *he)                          \
+{                                                                              \
+       return he->stat._field;                                                 \
+}                                                                              \
+                                                                               \
+static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he)     \
+{                                                                              \
+       const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64;    \
+       return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \
+}
+
+#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width)  \
+__HPP_HEADER_FN(_type, _str, _min_width, _unit_width)                  \
+__HPP_WIDTH_FN(_type, _min_width, _unit_width)                         \
+__HPP_COLOR_PERCENT_FN(_type, _field)                                  \
+__HPP_ENTRY_PERCENT_FN(_type, _field)
+
+#define HPP_RAW_FNS(_type, _str, _field, _min_width, _unit_width)      \
+__HPP_HEADER_FN(_type, _str, _min_width, _unit_width)                  \
+__HPP_WIDTH_FN(_type, _min_width, _unit_width)                         \
+__HPP_ENTRY_RAW_FN(_type, _field)
+
+
+HPP_PERCENT_FNS(overhead, "Overhead", period, 8, 8)
+HPP_PERCENT_FNS(overhead_sys, "sys", period_sys, 8, 8)
+HPP_PERCENT_FNS(overhead_us, "usr", period_us, 8, 8)
+HPP_PERCENT_FNS(overhead_guest_sys, "guest sys", period_guest_sys, 9, 8)
+HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
+
+HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
+HPP_RAW_FNS(period, "Period", period, 12, 12)
 
-       return scnprintf(hpp->buf, hpp->size, fmt, percent);
-}
 
 static int hpp__header_baseline(struct perf_hpp *hpp)
 {
@@ -179,7 +191,7 @@ static int hpp__color_baseline(struct perf_hpp *hpp, struct hist_entry *he)
 {
        double percent = baseline_percent(he);
 
-       if (hist_entry__has_pairs(he))
+       if (hist_entry__has_pairs(he) || symbol_conf.field_sep)
                return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
        else
                return scnprintf(hpp->buf, hpp->size, "        ");
@@ -196,44 +208,6 @@ static int hpp__entry_baseline(struct perf_hpp *hpp, struct hist_entry *he)
                return scnprintf(hpp->buf, hpp->size, "            ");
 }
 
-static int hpp__header_samples(struct perf_hpp *hpp)
-{
-       const char *fmt = symbol_conf.field_sep ? "%s" : "%11s";
-
-       return scnprintf(hpp->buf, hpp->size, fmt, "Samples");
-}
-
-static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused)
-{
-       return 11;
-}
-
-static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64;
-
-       return scnprintf(hpp->buf, hpp->size, fmt, he->stat.nr_events);
-}
-
-static int hpp__header_period(struct perf_hpp *hpp)
-{
-       const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
-
-       return scnprintf(hpp->buf, hpp->size, fmt, "Period");
-}
-
-static int hpp__width_period(struct perf_hpp *hpp __maybe_unused)
-{
-       return 12;
-}
-
-static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he)
-{
-       const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64;
-
-       return scnprintf(hpp->buf, hpp->size, fmt, he->stat.period);
-}
-
 static int hpp__header_period_baseline(struct perf_hpp *hpp)
 {
        const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
@@ -254,6 +228,7 @@ static int hpp__entry_period_baseline(struct perf_hpp *hpp, struct hist_entry *h
 
        return scnprintf(hpp->buf, hpp->size, fmt, period);
 }
+
 static int hpp__header_delta(struct perf_hpp *hpp)
 {
        const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
@@ -268,14 +243,18 @@ static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused)
 
 static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he)
 {
+       struct hist_entry *pair = hist_entry__next_pair(he);
        const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s";
        char buf[32] = " ";
-       double diff;
+       double diff = 0.0;
 
-       if (he->diff.computed)
-               diff = he->diff.period_ratio_delta;
-       else
-               diff = perf_diff__compute_delta(he);
+       if (pair) {
+               if (he->diff.computed)
+                       diff = he->diff.period_ratio_delta;
+               else
+                       diff = perf_diff__compute_delta(he, pair);
+       } else
+               diff = perf_diff__period_percent(he, he->stat.period);
 
        if (fabs(diff) >= 0.01)
                scnprintf(buf, sizeof(buf), "%+4.2F%%", diff);
@@ -297,14 +276,17 @@ static int hpp__width_ratio(struct perf_hpp *hpp __maybe_unused)
 
 static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he)
 {
+       struct hist_entry *pair = hist_entry__next_pair(he);
        const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
        char buf[32] = " ";
-       double ratio;
+       double ratio = 0.0;
 
-       if (he->diff.computed)
-               ratio = he->diff.period_ratio;
-       else
-               ratio = perf_diff__compute_ratio(he);
+       if (pair) {
+               if (he->diff.computed)
+                       ratio = he->diff.period_ratio;
+               else
+                       ratio = perf_diff__compute_ratio(he, pair);
+       }
 
        if (ratio > 0.0)
                scnprintf(buf, sizeof(buf), "%+14.6F", ratio);
@@ -326,14 +308,17 @@ static int hpp__width_wdiff(struct perf_hpp *hpp __maybe_unused)
 
 static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
 {
+       struct hist_entry *pair = hist_entry__next_pair(he);
        const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
        char buf[32] = " ";
-       s64 wdiff;
+       s64 wdiff = 0;
 
-       if (he->diff.computed)
-               wdiff = he->diff.wdiff;
-       else
-               wdiff = perf_diff__compute_wdiff(he);
+       if (pair) {
+               if (he->diff.computed)
+                       wdiff = he->diff.wdiff;
+               else
+                       wdiff = perf_diff__compute_wdiff(he, pair);
+       }
 
        if (wdiff != 0)
                scnprintf(buf, sizeof(buf), "%14ld", wdiff);
@@ -341,30 +326,6 @@ static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
        return scnprintf(hpp->buf, hpp->size, fmt, buf);
 }
 
-static int hpp__header_displ(struct perf_hpp *hpp)
-{
-       return scnprintf(hpp->buf, hpp->size, "Displ.");
-}
-
-static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused)
-{
-       return 6;
-}
-
-static int hpp__entry_displ(struct perf_hpp *hpp,
-                           struct hist_entry *he)
-{
-       struct hist_entry *pair = hist_entry__next_pair(he);
-       long displacement = pair ? pair->position - he->position : 0;
-       const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s";
-       char buf[32] = " ";
-
-       if (displacement)
-               scnprintf(buf, sizeof(buf), "%+4ld", displacement);
-
-       return scnprintf(hpp->buf, hpp->size, fmt, buf);
-}
-
 static int hpp__header_formula(struct perf_hpp *hpp)
 {
        const char *fmt = symbol_conf.field_sep ? "%s" : "%70s";
@@ -379,67 +340,91 @@ static int hpp__width_formula(struct perf_hpp *hpp __maybe_unused)
 
 static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he)
 {
+       struct hist_entry *pair = hist_entry__next_pair(he);
        const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s";
        char buf[96] = " ";
 
-       perf_diff__formula(buf, sizeof(buf), he);
+       if (pair)
+               perf_diff__formula(he, pair, buf, sizeof(buf));
+
        return scnprintf(hpp->buf, hpp->size, fmt, buf);
 }
 
-#define HPP__COLOR_PRINT_FNS(_name)            \
-       .header = hpp__header_ ## _name,                \
-       .width  = hpp__width_ ## _name,         \
-       .color  = hpp__color_ ## _name,         \
-       .entry  = hpp__entry_ ## _name
+#define HPP__COLOR_PRINT_FNS(_name)                    \
+       {                                               \
+               .header = hpp__header_ ## _name,        \
+               .width  = hpp__width_ ## _name,         \
+               .color  = hpp__color_ ## _name,         \
+               .entry  = hpp__entry_ ## _name          \
+       }
 
-#define HPP__PRINT_FNS(_name)                  \
-       .header = hpp__header_ ## _name,                \
-       .width  = hpp__width_ ## _name,         \
-       .entry  = hpp__entry_ ## _name
+#define HPP__PRINT_FNS(_name)                          \
+       {                                               \
+               .header = hpp__header_ ## _name,        \
+               .width  = hpp__width_ ## _name,         \
+               .entry  = hpp__entry_ ## _name          \
+       }
 
 struct perf_hpp_fmt perf_hpp__format[] = {
-       { .cond = false, HPP__COLOR_PRINT_FNS(baseline) },
-       { .cond = true,  HPP__COLOR_PRINT_FNS(overhead) },
-       { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) },
-       { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) },
-       { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) },
-       { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) },
-       { .cond = false, HPP__PRINT_FNS(samples) },
-       { .cond = false, HPP__PRINT_FNS(period) },
-       { .cond = false, HPP__PRINT_FNS(period_baseline) },
-       { .cond = false, HPP__PRINT_FNS(delta) },
-       { .cond = false, HPP__PRINT_FNS(ratio) },
-       { .cond = false, HPP__PRINT_FNS(wdiff) },
-       { .cond = false, HPP__PRINT_FNS(displ) },
-       { .cond = false, HPP__PRINT_FNS(formula) }
+       HPP__COLOR_PRINT_FNS(baseline),
+       HPP__COLOR_PRINT_FNS(overhead),
+       HPP__COLOR_PRINT_FNS(overhead_sys),
+       HPP__COLOR_PRINT_FNS(overhead_us),
+       HPP__COLOR_PRINT_FNS(overhead_guest_sys),
+       HPP__COLOR_PRINT_FNS(overhead_guest_us),
+       HPP__PRINT_FNS(samples),
+       HPP__PRINT_FNS(period),
+       HPP__PRINT_FNS(period_baseline),
+       HPP__PRINT_FNS(delta),
+       HPP__PRINT_FNS(ratio),
+       HPP__PRINT_FNS(wdiff),
+       HPP__PRINT_FNS(formula)
 };
 
+LIST_HEAD(perf_hpp__list);
+
+
 #undef HPP__COLOR_PRINT_FNS
 #undef HPP__PRINT_FNS
 
+#undef HPP_PERCENT_FNS
+#undef HPP_RAW_FNS
+
+#undef __HPP_HEADER_FN
+#undef __HPP_WIDTH_FN
+#undef __HPP_COLOR_PERCENT_FN
+#undef __HPP_ENTRY_PERCENT_FN
+#undef __HPP_ENTRY_RAW_FN
+
+
 void perf_hpp__init(void)
 {
        if (symbol_conf.show_cpu_utilization) {
-               perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true;
-               perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true;
+               perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
+               perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
 
                if (perf_guest) {
-                       perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true;
-                       perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true;
+                       perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_SYS);
+                       perf_hpp__column_enable(PERF_HPP__OVERHEAD_GUEST_US);
                }
        }
 
        if (symbol_conf.show_nr_samples)
-               perf_hpp__format[PERF_HPP__SAMPLES].cond = true;
+               perf_hpp__column_enable(PERF_HPP__SAMPLES);
 
        if (symbol_conf.show_total_period)
-               perf_hpp__format[PERF_HPP__PERIOD].cond = true;
+               perf_hpp__column_enable(PERF_HPP__PERIOD);
+}
+
+void perf_hpp__column_register(struct perf_hpp_fmt *format)
+{
+       list_add_tail(&format->list, &perf_hpp__list);
 }
 
-void perf_hpp__column_enable(unsigned col, bool enable)
+void perf_hpp__column_enable(unsigned col)
 {
        BUG_ON(col >= PERF_HPP__MAX_INDEX);
-       perf_hpp__format[col].cond = enable;
+       perf_hpp__column_register(&perf_hpp__format[col]);
 }
 
 static inline void advance_hpp(struct perf_hpp *hpp, int inc)
@@ -452,27 +437,29 @@ int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
                                bool color)
 {
        const char *sep = symbol_conf.field_sep;
+       struct perf_hpp_fmt *fmt;
        char *start = hpp->buf;
-       int i, ret;
+       int ret;
        bool first = true;
 
        if (symbol_conf.exclude_other && !he->parent)
                return 0;
 
-       for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
-               if (!perf_hpp__format[i].cond)
-                       continue;
-
+       perf_hpp__for_each_format(fmt) {
+               /*
+                * If there's no field_sep, we still need
+                * to display initial '  '.
+                */
                if (!sep || !first) {
                        ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
                        advance_hpp(hpp, ret);
+               } else
                        first = false;
-               }
 
-               if (color && perf_hpp__format[i].color)
-                       ret = perf_hpp__format[i].color(hpp, he);
+               if (color && fmt->color)
+                       ret = fmt->color(hpp, he);
                else
-                       ret = perf_hpp__format[i].entry(hpp, he);
+                       ret = fmt->entry(hpp, he);
 
                advance_hpp(hpp, ret);
        }
@@ -504,16 +491,18 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
  */
 unsigned int hists__sort_list_width(struct hists *hists)
 {
+       struct perf_hpp_fmt *fmt;
        struct sort_entry *se;
-       int i, ret = 0;
+       int i = 0, ret = 0;
+       struct perf_hpp dummy_hpp = {
+               .ptr    = hists_to_evsel(hists),
+       };
 
-       for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
-               if (!perf_hpp__format[i].cond)
-                       continue;
+       perf_hpp__for_each_format(fmt) {
                if (i)
                        ret += 2;
 
-               ret += perf_hpp__format[i].width(NULL);
+               ret += fmt->width(&dummy_hpp);
        }
 
        list_for_each_entry(se, &hist_entry__sort_list, list)
index 809eca5707fae1fccee91dcd8a22ef46603f347c..65092d576b4e25fb4d8ad7876e6d399492165218 100644 (file)
@@ -23,5 +23,6 @@
 #define K_TIMER         -1
 #define K_ERROR         -2
 #define K_RESIZE -3
+#define K_SWITCH_INPUT_DATA -4
 
 #endif /* _PERF_KEYSYMS_H_ */
index ebb4cc10787625aca596f6a41a14d56efd9e60d8..ae6a789cb0f62780a9fa088f7fe99939c798ef2d 100644 (file)
@@ -8,7 +8,7 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
 
 void setup_browser(bool fallback_to_pager)
 {
-       if (!isatty(1) || dump_trace)
+       if (use_browser < 2 && (!isatty(1) || dump_trace))
                use_browser = 0;
 
        /* default to TUI */
@@ -30,6 +30,7 @@ void setup_browser(bool fallback_to_pager)
                if (fallback_to_pager)
                        setup_pager();
 
+               perf_hpp__column_enable(PERF_HPP__OVERHEAD);
                perf_hpp__init();
                break;
        }
index f0ee204f99bb4bffddedd7434d276f05d98cfb9c..ff1f60cf442e164f6843031fe89d7a9f14d0c288 100644 (file)
@@ -3,6 +3,7 @@
 #include "../../util/util.h"
 #include "../../util/hist.h"
 #include "../../util/sort.h"
+#include "../../util/evsel.h"
 
 
 static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
@@ -335,17 +336,19 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                      int max_cols, FILE *fp)
 {
+       struct perf_hpp_fmt *fmt;
        struct sort_entry *se;
        struct rb_node *nd;
        size_t ret = 0;
        unsigned int width;
        const char *sep = symbol_conf.field_sep;
        const char *col_width = symbol_conf.col_width_list_str;
-       int idx, nr_rows = 0;
+       int nr_rows = 0;
        char bf[96];
        struct perf_hpp dummy_hpp = {
                .buf    = bf,
                .size   = sizeof(bf),
+               .ptr    = hists_to_evsel(hists),
        };
        bool first = true;
 
@@ -355,16 +358,14 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                goto print_entries;
 
        fprintf(fp, "# ");
-       for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
-               if (!perf_hpp__format[idx].cond)
-                       continue;
 
+       perf_hpp__for_each_format(fmt) {
                if (!first)
                        fprintf(fp, "%s", sep ?: "  ");
                else
                        first = false;
 
-               perf_hpp__format[idx].header(&dummy_hpp);
+               fmt->header(&dummy_hpp);
                fprintf(fp, "%s", bf);
        }
 
@@ -400,18 +401,16 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
        first = true;
 
        fprintf(fp, "# ");
-       for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
-               unsigned int i;
 
-               if (!perf_hpp__format[idx].cond)
-                       continue;
+       perf_hpp__for_each_format(fmt) {
+               unsigned int i;
 
                if (!first)
                        fprintf(fp, "%s", sep ?: "  ");
                else
                        first = false;
 
-               width = perf_hpp__format[idx].width(&dummy_hpp);
+               width = fmt->width(&dummy_hpp);
                for (i = 0; i < width; i++)
                        fprintf(fp, ".");
        }
@@ -462,7 +461,7 @@ out:
        return ret;
 }
 
-size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
 {
        int i;
        size_t ret = 0;
@@ -470,7 +469,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
        for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
                const char *name;
 
-               if (hists->stats.nr_events[i] == 0)
+               if (stats->nr_events[i] == 0)
                        continue;
 
                name = perf_event__name(i);
@@ -478,7 +477,7 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
                        continue;
 
                ret += fprintf(fp, "%16s events: %10d\n", name,
-                              hists->stats.nr_events[i]);
+                              stats->nr_events[i]);
        }
 
        return ret;
index 2884d2f41e33ffb336431f8aac42be19625d0a91..1c8b9afd5d6e723127ade5700e52a07eeee2cf8d 100644 (file)
@@ -8,6 +8,8 @@
 #include "../ui.h"
 #include "../libslang.h"
 
+char ui_helpline__last_msg[1024];
+
 static void tui_helpline__pop(void)
 {
 }
@@ -23,20 +25,7 @@ static void tui_helpline__push(const char *msg)
        strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
 }
 
-struct ui_helpline tui_helpline_fns = {
-       .pop    = tui_helpline__pop,
-       .push   = tui_helpline__push,
-};
-
-void ui_helpline__init(void)
-{
-       helpline_fns = &tui_helpline_fns;
-       ui_helpline__puts(" ");
-}
-
-char ui_helpline__last_msg[1024];
-
-int ui_helpline__show_help(const char *format, va_list ap)
+static int tui_helpline__show(const char *format, va_list ap)
 {
        int ret;
        static int backlog;
@@ -55,3 +44,15 @@ int ui_helpline__show_help(const char *format, va_list ap)
 
        return ret;
 }
+
+struct ui_helpline tui_helpline_fns = {
+       .pop    = tui_helpline__pop,
+       .push   = tui_helpline__push,
+       .show   = tui_helpline__show,
+};
+
+void ui_helpline__init(void)
+{
+       helpline_fns = &tui_helpline_fns;
+       ui_helpline__puts(" ");
+}
index 4f989774c8c674c535990dcf82ae2ba47d5971e3..e3e0a963d03aa3559924af97a33bb67c3f48b275 100644 (file)
@@ -52,7 +52,6 @@ int ui__warning(const char *format, ...)
        return ret;
 }
 
-
 /**
  * perf_error__register - Register error logging functions
  * @eops: The pointer to error logging function struct
index 6aa34e5afdcfc0a57cafc12d6bc631d2d8169fbb..055fef34b6f6cf791a4b7daa8e73a8e3765591ff 100755 (executable)
@@ -26,13 +26,13 @@ VN=$(expr "$VN" : v*'\(.*\)')
 
 if test -r $GVF
 then
-       VC=$(sed -e 's/^PERF_VERSION = //' <$GVF)
+       VC=$(sed -e 's/^#define PERF_VERSION "\(.*\)"/\1/' <$GVF)
 else
        VC=unset
 fi
 test "$VN" = "$VC" || {
        echo >&2 "PERF_VERSION = $VN"
-       echo "PERF_VERSION = $VN" >$GVF
+       echo "#define PERF_VERSION \"$VN\"" >$GVF
 }
 
 
index 07aaeea600007b05dab7ff8b04e720735e5f4643..d33fe937e6f18ea6775e653d934878955f70f0be 100644 (file)
@@ -809,7 +809,7 @@ fallback:
                pr_err("Can't annotate %s:\n\n"
                       "No vmlinux file%s\nwas found in the path.\n\n"
                       "Please use:\n\n"
-                      "  perf buildid-cache -av vmlinux\n\n"
+                      "  perf buildid-cache -vu vmlinux\n\n"
                       "or:\n\n"
                       "  --vmlinux vmlinux\n",
                       sym->name, build_id_msg ?: "");
index 8eec94358a4a1cc627e2d7b16acd62679626bd65..c422440fe611689b1a0b089f308a4aa876e547fc 100644 (file)
@@ -6,6 +6,7 @@
 #include "types.h"
 #include "symbol.h"
 #include "hist.h"
+#include "sort.h"
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <pthread.h>
@@ -154,6 +155,29 @@ static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused,
 }
 #endif
 
+#ifdef GTK2_SUPPORT
+int symbol__gtk_annotate(struct symbol *sym, struct map *map, int evidx,
+                        struct hist_browser_timer *hbt);
+
+static inline int hist_entry__gtk_annotate(struct hist_entry *he, int evidx,
+                                          struct hist_browser_timer *hbt)
+{
+       return symbol__gtk_annotate(he->ms.sym, he->ms.map, evidx, hbt);
+}
+
+void perf_gtk__show_annotations(void);
+#else
+static inline int hist_entry__gtk_annotate(struct hist_entry *he __maybe_unused,
+                                          int evidx __maybe_unused,
+                                          struct hist_browser_timer *hbt
+                                          __maybe_unused)
+{
+       return 0;
+}
+
+static inline void perf_gtk__show_annotations(void) {}
+#endif
+
 extern const char      *disassembler_style;
 
 #endif /* __PERF_ANNOTATE_H */
index d3b3f5d82137fd23cbdee5ab4686bec78802ff49..42b6a632fe7b75bb297433e9ad9962814e5016d9 100644 (file)
@@ -444,7 +444,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
        struct callchain_cursor_node *node = *cursor->last;
 
        if (!node) {
-               node = calloc(sizeof(*node), 1);
+               node = calloc(1, sizeof(*node));
                if (!node)
                        return -ENOMEM;
 
index eb340571e7d6928394b218010926898029569d9d..3ee9f67d5af0bed457cb7ae22d5efe99ccdbfd8f 100644 (file)
@@ -143,4 +143,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
        cursor->curr = cursor->curr->next;
        cursor->pos++;
 }
+
+struct option;
+
+int record_parse_callchain_opt(const struct option *opt, const char *arg, int unset);
+extern const char record_callchain_help[];
 #endif /* __PERF_CALLCHAIN_H */
index 2b32ffa9ebdb188e8edeb31fc4176380d99b2337..f817046e22b1204bc75c41d701464bb893e530fd 100644 (file)
@@ -1,4 +1,5 @@
 #include "util.h"
+#include "sysfs.h"
 #include "../perf.h"
 #include "cpumap.h"
 #include <assert.h>
@@ -201,3 +202,56 @@ void cpu_map__delete(struct cpu_map *map)
 {
        free(map);
 }
+
+int cpu_map__get_socket(struct cpu_map *map, int idx)
+{
+       FILE *fp;
+       const char *mnt;
+       char path[PATH_MAX];
+       int cpu, ret;
+
+       if (idx > map->nr)
+               return -1;
+
+       cpu = map->map[idx];
+
+       mnt = sysfs_find_mountpoint();
+       if (!mnt)
+               return -1;
+
+       sprintf(path,
+               "%s/devices/system/cpu/cpu%d/topology/physical_package_id",
+               mnt, cpu);
+
+       fp = fopen(path, "r");
+       if (!fp)
+               return -1;
+       ret = fscanf(fp, "%d", &cpu);
+       fclose(fp);
+       return ret == 1 ? cpu : -1;
+}
+
+int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
+{
+       struct cpu_map *sock;
+       int nr = cpus->nr;
+       int cpu, s1, s2;
+
+       sock = calloc(1, sizeof(*sock) + nr * sizeof(int));
+       if (!sock)
+               return -1;
+
+       for (cpu = 0; cpu < nr; cpu++) {
+               s1 = cpu_map__get_socket(cpus, cpu);
+               for (s2 = 0; s2 < sock->nr; s2++) {
+                       if (s1 == sock->map[s2])
+                               break;
+               }
+               if (s2 == sock->nr) {
+                       sock->map[sock->nr] = s1;
+                       sock->nr++;
+               }
+       }
+       *sockp = sock;
+       return 0;
+}
index 2f68a3b8c285dd80f6c21bc47f180e326528cc15..161b00756a1205a25f42ab171127e56f7677c1b0 100644 (file)
@@ -14,6 +14,15 @@ struct cpu_map *cpu_map__dummy_new(void);
 void cpu_map__delete(struct cpu_map *map);
 struct cpu_map *cpu_map__read(FILE *file);
 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
+int cpu_map__get_socket(struct cpu_map *map, int idx);
+int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp);
+
+static inline int cpu_map__socket(struct cpu_map *sock, int s)
+{
+       if (!sock || s > sock->nr || s < 0)
+               return 0;
+       return sock->map[s];
+}
 
 static inline int cpu_map__nr(const struct cpu_map *map)
 {
index 03f830b48148a7b41f0c4bb1b59b7f1614db035c..399e74c34c1aa5220ecc29bd0d33cca2d2a3ac82 100644 (file)
@@ -23,10 +23,8 @@ int eprintf(int level, const char *fmt, ...)
 
        if (verbose >= level) {
                va_start(args, fmt);
-               if (use_browser == 1)
-                       ret = ui_helpline__show_help(fmt, args);
-               else if (use_browser == 2)
-                       ret = perf_gtk__show_helpline(fmt, args);
+               if (use_browser >= 1)
+                       ui_helpline__vshow(fmt, args);
                else
                        ret = vfprintf(stderr, fmt, args);
                va_end(args);
@@ -49,28 +47,6 @@ int dump_printf(const char *fmt, ...)
        return ret;
 }
 
-#if !defined(NEWT_SUPPORT) && !defined(GTK2_SUPPORT)
-int ui__warning(const char *format, ...)
-{
-       va_list args;
-
-       va_start(args, format);
-       vfprintf(stderr, format, args);
-       va_end(args);
-       return 0;
-}
-#endif
-
-int ui__error_paranoid(void)
-{
-       return ui__error("Permission error - are you root?\n"
-                   "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
-                   " -1 - Not paranoid at all\n"
-                   "  0 - Disallow raw tracepoint access for unpriv\n"
-                   "  1 - Disallow cpu events for unpriv\n"
-                   "  2 - Disallow kernel profiling for unpriv\n");
-}
-
 void trace_event(union perf_event *event)
 {
        unsigned char *raw_event = (void *)event;
index 83e8d234af6b149a045902c267dbb8c6b8926ad0..efbd98805ad0b0086996298b698122a2b0ceb018 100644 (file)
@@ -5,6 +5,8 @@
 #include <stdbool.h>
 #include "event.h"
 #include "../ui/helpline.h"
+#include "../ui/progress.h"
+#include "../ui/util.h"
 
 extern int verbose;
 extern bool quiet, dump_trace;
@@ -12,39 +14,7 @@ extern bool quiet, dump_trace;
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(union perf_event *event);
 
-struct ui_progress;
-struct perf_error_ops;
-
-#if defined(NEWT_SUPPORT) || defined(GTK2_SUPPORT)
-
-#include "../ui/progress.h"
 int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
-#include "../ui/util.h"
-
-#else
-
-static inline void ui_progress__update(u64 curr __maybe_unused,
-                                      u64 total __maybe_unused,
-                                      const char *title __maybe_unused) {}
-static inline void ui_progress__finish(void) {}
-
-#define ui__error(format, arg...) ui__warning(format, ##arg)
-
-static inline int
-perf_error__register(struct perf_error_ops *eops __maybe_unused)
-{
-       return 0;
-}
-
-static inline int
-perf_error__unregister(struct perf_error_ops *eops __maybe_unused)
-{
-       return 0;
-}
-
-#endif /* NEWT_SUPPORT || GTK2_SUPPORT */
-
 int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
-int ui__error_paranoid(void);
 
 #endif /* __PERF_DEBUG_H */
index d6d9a465acdbe09437571ca737e7d28cb5fdf770..6f7d5a9d6b050ddec28b0b76461e155c8829794a 100644 (file)
@@ -539,13 +539,13 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name)
 }
 
 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
-                              bool with_hits)
+                              bool (skip)(struct dso *dso, int parm), int parm)
 {
        struct dso *pos;
        size_t ret = 0;
 
        list_for_each_entry(pos, head, node) {
-               if (with_hits && !pos->hit)
+               if (skip && skip(pos, parm))
                        continue;
                ret += dso__fprintf_buildid(pos, fp);
                ret += fprintf(fp, " %s\n", pos->long_name);
@@ -583,7 +583,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
        if (dso->short_name != dso->long_name)
                ret += fprintf(fp, "%s, ", dso->long_name);
        ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type],
-                      dso->loaded ? "" : "NOT ");
+                      dso__loaded(dso, type) ? "" : "NOT ");
        ret += dso__fprintf_buildid(dso, fp);
        ret += fprintf(fp, ")\n");
        for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) {
index e03276940b99638b2a25c90f6bd852800051ffb2..450199ab51b5d8364dd0393a75c5f04d913b6300 100644 (file)
@@ -138,7 +138,7 @@ struct dso *__dsos__findnew(struct list_head *head, const char *name);
 bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
 
 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
-                              bool with_hits);
+                              bool (skip)(struct dso *dso, int parm), int parm);
 size_t __dsos__fprintf(struct list_head *head, FILE *fp);
 
 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
index 3cf2c3e0605f249543c11ddbe2c39cb2607a6f7a..5cd13d768cecee1a630ca51715dba4b6d8f19f0b 100644 (file)
@@ -476,8 +476,10 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
                }
        }
 
-       if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
+       if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
+               free(event);
                return -ENOENT;
+       }
 
        map = machine->vmlinux_maps[MAP__FUNCTION];
        size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
index 705293489e3c319c0b13ec3f5f32a3c1fd1fe5ed..bc4ad79774387aec0a0e9355c7a1a47c747b0528 100644 (file)
@@ -49,10 +49,16 @@ struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
        return evlist;
 }
 
-void perf_evlist__config_attrs(struct perf_evlist *evlist,
-                              struct perf_record_opts *opts)
+void perf_evlist__config(struct perf_evlist *evlist,
+                       struct perf_record_opts *opts)
 {
        struct perf_evsel *evsel;
+       /*
+        * Set the evsel leader links before we configure attributes,
+        * since some might depend on this info.
+        */
+       if (opts->group)
+               perf_evlist__set_leader(evlist);
 
        if (evlist->cpus->map[0] < 0)
                opts->no_inherit = true;
@@ -61,7 +67,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist,
                perf_evsel__config(evsel, opts);
 
                if (evlist->nr_entries > 1)
-                       evsel->attr.sample_type |= PERF_SAMPLE_ID;
+                       perf_evsel__set_sample_id(evsel);
        }
 }
 
@@ -111,18 +117,21 @@ void __perf_evlist__set_leader(struct list_head *list)
        struct perf_evsel *evsel, *leader;
 
        leader = list_entry(list->next, struct perf_evsel, node);
-       leader->leader = NULL;
+       evsel = list_entry(list->prev, struct perf_evsel, node);
+
+       leader->nr_members = evsel->idx - leader->idx + 1;
 
        list_for_each_entry(evsel, list, node) {
-               if (evsel != leader)
-                       evsel->leader = leader;
+               evsel->leader = leader;
        }
 }
 
 void perf_evlist__set_leader(struct perf_evlist *evlist)
 {
-       if (evlist->nr_entries)
+       if (evlist->nr_entries) {
+               evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
                __perf_evlist__set_leader(&evlist->entries);
+       }
 }
 
 int perf_evlist__add_default(struct perf_evlist *evlist)
@@ -222,7 +231,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
 
        for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
-                       if (perf_evsel__is_group_member(pos))
+                       if (!perf_evsel__is_group_leader(pos))
                                continue;
                        for (thread = 0; thread < evlist->threads->nr; thread++)
                                ioctl(FD(pos, cpu, thread),
@@ -238,7 +247,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
 
        for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) {
                list_for_each_entry(pos, &evlist->entries, node) {
-                       if (perf_evsel__is_group_member(pos))
+                       if (!perf_evsel__is_group_leader(pos))
                                continue;
                        for (thread = 0; thread < evlist->threads->nr; thread++)
                                ioctl(FD(pos, cpu, thread),
@@ -366,7 +375,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
                if ((old & md->mask) + size != ((old + size) & md->mask)) {
                        unsigned int offset = old;
                        unsigned int len = min(sizeof(*event), size), cpy;
-                       void *dst = &evlist->event_copy;
+                       void *dst = &md->event_copy;
 
                        do {
                                cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -376,7 +385,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
                                len -= cpy;
                        } while (len);
 
-                       event = &evlist->event_copy;
+                       event = &md->event_copy;
                }
 
                old += size;
index 56003f779e601d181eec34fd38de5bb66c30bc5c..2dd07bd60b4f7048a41752a92aa7dc4dc23fced8 100644 (file)
@@ -17,10 +17,18 @@ struct perf_record_opts;
 #define PERF_EVLIST__HLIST_BITS 8
 #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
 
+struct perf_mmap {
+       void             *base;
+       int              mask;
+       unsigned int     prev;
+       union perf_event event_copy;
+};
+
 struct perf_evlist {
        struct list_head entries;
        struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
        int              nr_entries;
+       int              nr_groups;
        int              nr_fds;
        int              nr_mmaps;
        int              mmap_len;
@@ -29,7 +37,6 @@ struct perf_evlist {
                pid_t   pid;
        } workload;
        bool             overwrite;
-       union perf_event event_copy;
        struct perf_mmap *mmap;
        struct pollfd    *pollfd;
        struct thread_map *threads;
@@ -76,8 +83,8 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
 
 int perf_evlist__open(struct perf_evlist *evlist);
 
-void perf_evlist__config_attrs(struct perf_evlist *evlist,
-                              struct perf_record_opts *opts);
+void perf_evlist__config(struct perf_evlist *evlist,
+                        struct perf_record_opts *opts);
 
 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
                                  struct perf_record_opts *opts,
@@ -135,4 +142,25 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
 }
 
 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
+
+static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
+{
+       struct perf_event_mmap_page *pc = mm->base;
+       int head = pc->data_head;
+       rmb();
+       return head;
+}
+
+static inline void perf_mmap__write_tail(struct perf_mmap *md,
+                                        unsigned long tail)
+{
+       struct perf_event_mmap_page *pc = md->base;
+
+       /*
+        * ensure all reads are done before we write the tail out.
+        */
+       /* mb(); */
+       pc->data_tail = tail;
+}
+
 #endif /* __PERF_EVLIST_H */
index 1b16dd1edc8eb2c6d632ec313e7380aac6c575c7..9c82f98f26dedd460fb5b2c70fcb8256c33fca42 100644 (file)
 #include <linux/perf_event.h>
 #include "perf_regs.h"
 
+static struct {
+       bool sample_id_all;
+       bool exclude_guest;
+} perf_missing_features;
+
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
 static int __perf_evsel__sample_size(u64 sample_type)
@@ -50,11 +55,36 @@ void hists__init(struct hists *hists)
        pthread_mutex_init(&hists->lock, NULL);
 }
 
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+                                 enum perf_event_sample_format bit)
+{
+       if (!(evsel->attr.sample_type & bit)) {
+               evsel->attr.sample_type |= bit;
+               evsel->sample_size += sizeof(u64);
+       }
+}
+
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+                                   enum perf_event_sample_format bit)
+{
+       if (evsel->attr.sample_type & bit) {
+               evsel->attr.sample_type &= ~bit;
+               evsel->sample_size -= sizeof(u64);
+       }
+}
+
+void perf_evsel__set_sample_id(struct perf_evsel *evsel)
+{
+       perf_evsel__set_sample_bit(evsel, ID);
+       evsel->attr.read_format |= PERF_FORMAT_ID;
+}
+
 void perf_evsel__init(struct perf_evsel *evsel,
                      struct perf_event_attr *attr, int idx)
 {
        evsel->idx         = idx;
        evsel->attr        = *attr;
+       evsel->leader      = evsel;
        INIT_LIST_HEAD(&evsel->node);
        hists__init(&evsel->hists);
        evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
@@ -404,6 +434,31 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
        return evsel->name ?: "unknown";
 }
 
+const char *perf_evsel__group_name(struct perf_evsel *evsel)
+{
+       return evsel->group_name ?: "anon group";
+}
+
+int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
+{
+       int ret;
+       struct perf_evsel *pos;
+       const char *group_name = perf_evsel__group_name(evsel);
+
+       ret = scnprintf(buf, size, "%s", group_name);
+
+       ret += scnprintf(buf + ret, size - ret, " { %s",
+                        perf_evsel__name(evsel));
+
+       for_each_group_member(pos, evsel)
+               ret += scnprintf(buf + ret, size - ret, ", %s",
+                                perf_evsel__name(pos));
+
+       ret += scnprintf(buf + ret, size - ret, " }");
+
+       return ret;
+}
+
 /*
  * The enable_on_exec/disabled value strategy:
  *
@@ -438,13 +493,11 @@ void perf_evsel__config(struct perf_evsel *evsel,
        struct perf_event_attr *attr = &evsel->attr;
        int track = !evsel->idx; /* only the first counter needs these */
 
-       attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
+       attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
        attr->inherit       = !opts->no_inherit;
-       attr->read_format   = PERF_FORMAT_TOTAL_TIME_ENABLED |
-                             PERF_FORMAT_TOTAL_TIME_RUNNING |
-                             PERF_FORMAT_ID;
 
-       attr->sample_type  |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+       perf_evsel__set_sample_bit(evsel, IP);
+       perf_evsel__set_sample_bit(evsel, TID);
 
        /*
         * We default some events to a 1 default interval. But keep
@@ -453,7 +506,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
        if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
                                     opts->user_interval != ULLONG_MAX)) {
                if (opts->freq) {
-                       attr->sample_type       |= PERF_SAMPLE_PERIOD;
+                       perf_evsel__set_sample_bit(evsel, PERIOD);
                        attr->freq              = 1;
                        attr->sample_freq       = opts->freq;
                } else {
@@ -468,16 +521,16 @@ void perf_evsel__config(struct perf_evsel *evsel,
                attr->inherit_stat = 1;
 
        if (opts->sample_address) {
-               attr->sample_type       |= PERF_SAMPLE_ADDR;
+               perf_evsel__set_sample_bit(evsel, ADDR);
                attr->mmap_data = track;
        }
 
        if (opts->call_graph) {
-               attr->sample_type       |= PERF_SAMPLE_CALLCHAIN;
+               perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
                if (opts->call_graph == CALLCHAIN_DWARF) {
-                       attr->sample_type |= PERF_SAMPLE_REGS_USER |
-                                            PERF_SAMPLE_STACK_USER;
+                       perf_evsel__set_sample_bit(evsel, REGS_USER);
+                       perf_evsel__set_sample_bit(evsel, STACK_USER);
                        attr->sample_regs_user = PERF_REGS_MASK;
                        attr->sample_stack_user = opts->stack_dump_size;
                        attr->exclude_callchain_user = 1;
@@ -485,20 +538,20 @@ void perf_evsel__config(struct perf_evsel *evsel,
        }
 
        if (perf_target__has_cpu(&opts->target))
-               attr->sample_type       |= PERF_SAMPLE_CPU;
+               perf_evsel__set_sample_bit(evsel, CPU);
 
        if (opts->period)
-               attr->sample_type       |= PERF_SAMPLE_PERIOD;
+               perf_evsel__set_sample_bit(evsel, PERIOD);
 
-       if (!opts->sample_id_all_missing &&
+       if (!perf_missing_features.sample_id_all &&
            (opts->sample_time || !opts->no_inherit ||
             perf_target__has_cpu(&opts->target)))
-               attr->sample_type       |= PERF_SAMPLE_TIME;
+               perf_evsel__set_sample_bit(evsel, TIME);
 
        if (opts->raw_samples) {
-               attr->sample_type       |= PERF_SAMPLE_TIME;
-               attr->sample_type       |= PERF_SAMPLE_RAW;
-               attr->sample_type       |= PERF_SAMPLE_CPU;
+               perf_evsel__set_sample_bit(evsel, TIME);
+               perf_evsel__set_sample_bit(evsel, RAW);
+               perf_evsel__set_sample_bit(evsel, CPU);
        }
 
        if (opts->no_delay) {
@@ -506,7 +559,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
                attr->wakeup_events = 1;
        }
        if (opts->branch_stack) {
-               attr->sample_type       |= PERF_SAMPLE_BRANCH_STACK;
+               perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
                attr->branch_sample_type = opts->branch_stack;
        }
 
@@ -519,14 +572,14 @@ void perf_evsel__config(struct perf_evsel *evsel,
         * Disabling only independent events or group leaders,
         * keeping group members enabled.
         */
-       if (!perf_evsel__is_group_member(evsel))
+       if (perf_evsel__is_group_leader(evsel))
                attr->disabled = 1;
 
        /*
         * Setting enable_on_exec for independent events and
         * group leaders for traced executed by perf.
         */
-       if (perf_target__none(&opts->target) && !perf_evsel__is_group_member(evsel))
+       if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
                attr->enable_on_exec = 1;
 }
 
@@ -612,6 +665,11 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                }
 }
 
+void perf_evsel__free_counts(struct perf_evsel *evsel)
+{
+       free(evsel->counts);
+}
+
 void perf_evsel__exit(struct perf_evsel *evsel)
 {
        assert(list_empty(&evsel->node));
@@ -631,6 +689,28 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
+static inline void compute_deltas(struct perf_evsel *evsel,
+                                 int cpu,
+                                 struct perf_counts_values *count)
+{
+       struct perf_counts_values tmp;
+
+       if (!evsel->prev_raw_counts)
+               return;
+
+       if (cpu == -1) {
+               tmp = evsel->prev_raw_counts->aggr;
+               evsel->prev_raw_counts->aggr = *count;
+       } else {
+               tmp = evsel->prev_raw_counts->cpu[cpu];
+               evsel->prev_raw_counts->cpu[cpu] = *count;
+       }
+
+       count->val = count->val - tmp.val;
+       count->ena = count->ena - tmp.ena;
+       count->run = count->run - tmp.run;
+}
+
 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
                              int cpu, int thread, bool scale)
 {
@@ -646,6 +726,8 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
        if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
                return -errno;
 
+       compute_deltas(evsel, cpu, &count);
+
        if (scale) {
                if (count.run == 0)
                        count.val = 0;
@@ -684,6 +766,8 @@ int __perf_evsel__read(struct perf_evsel *evsel,
                }
        }
 
+       compute_deltas(evsel, -1, aggr);
+
        evsel->counts->scaled = 0;
        if (scale) {
                if (aggr->run == 0) {
@@ -707,7 +791,7 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
        struct perf_evsel *leader = evsel->leader;
        int fd;
 
-       if (!perf_evsel__is_group_member(evsel))
+       if (perf_evsel__is_group_leader(evsel))
                return -1;
 
        /*
@@ -738,6 +822,13 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                pid = evsel->cgrp->fd;
        }
 
+fallback_missing_features:
+       if (perf_missing_features.exclude_guest)
+               evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+retry_sample_id:
+       if (perf_missing_features.sample_id_all)
+               evsel->attr.sample_id_all = 0;
+
        for (cpu = 0; cpu < cpus->nr; cpu++) {
 
                for (thread = 0; thread < threads->nr; thread++) {
@@ -754,13 +845,26 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                                                                     group_fd, flags);
                        if (FD(evsel, cpu, thread) < 0) {
                                err = -errno;
-                               goto out_close;
+                               goto try_fallback;
                        }
                }
        }
 
        return 0;
 
+try_fallback:
+       if (err != -EINVAL || cpu > 0 || thread > 0)
+               goto out_close;
+
+       if (!perf_missing_features.exclude_guest &&
+           (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+               perf_missing_features.exclude_guest = true;
+               goto fallback_missing_features;
+       } else if (!perf_missing_features.sample_id_all) {
+               perf_missing_features.sample_id_all = true;
+               goto retry_sample_id;
+       }
+
 out_close:
        do {
                while (--thread >= 0) {
@@ -1205,3 +1309,225 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
 
        return 0;
 }
+
+static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
+{
+       va_list args;
+       int ret = 0;
+
+       if (!*first) {
+               ret += fprintf(fp, ",");
+       } else {
+               ret += fprintf(fp, ":");
+               *first = false;
+       }
+
+       va_start(args, fmt);
+       ret += vfprintf(fp, fmt, args);
+       va_end(args);
+       return ret;
+}
+
+static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
+{
+       if (value == 0)
+               return 0;
+
+       return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
+}
+
+#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
+
+struct bit_names {
+       int bit;
+       const char *name;
+};
+
+static int bits__fprintf(FILE *fp, const char *field, u64 value,
+                        struct bit_names *bits, bool *first)
+{
+       int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
+       bool first_bit = true;
+
+       do {
+               if (value & bits[i].bit) {
+                       printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
+                       first_bit = false;
+               }
+       } while (bits[++i].name != NULL);
+
+       return printed;
+}
+
+static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
+{
+#define bit_name(n) { PERF_SAMPLE_##n, #n }
+       struct bit_names bits[] = {
+               bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
+               bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
+               bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
+               bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+               { .name = NULL, }
+       };
+#undef bit_name
+       return bits__fprintf(fp, "sample_type", value, bits, first);
+}
+
+static int read_format__fprintf(FILE *fp, bool *first, u64 value)
+{
+#define bit_name(n) { PERF_FORMAT_##n, #n }
+       struct bit_names bits[] = {
+               bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
+               bit_name(ID), bit_name(GROUP),
+               { .name = NULL, }
+       };
+#undef bit_name
+       return bits__fprintf(fp, "read_format", value, bits, first);
+}
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+                       struct perf_attr_details *details, FILE *fp)
+{
+       bool first = true;
+       int printed = 0;
+
+       if (details->event_group) {
+               struct perf_evsel *pos;
+
+               if (!perf_evsel__is_group_leader(evsel))
+                       return 0;
+
+               if (evsel->nr_members > 1)
+                       printed += fprintf(fp, "%s{", evsel->group_name ?: "");
+
+               printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+               for_each_group_member(pos, evsel)
+                       printed += fprintf(fp, ",%s", perf_evsel__name(pos));
+
+               if (evsel->nr_members > 1)
+                       printed += fprintf(fp, "}");
+               goto out;
+       }
+
+       printed += fprintf(fp, "%s", perf_evsel__name(evsel));
+
+       if (details->verbose || details->freq) {
+               printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
+                                        (u64)evsel->attr.sample_freq);
+       }
+
+       if (details->verbose) {
+               if_print(type);
+               if_print(config);
+               if_print(config1);
+               if_print(config2);
+               if_print(size);
+               printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
+               if (evsel->attr.read_format)
+                       printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
+               if_print(disabled);
+               if_print(inherit);
+               if_print(pinned);
+               if_print(exclusive);
+               if_print(exclude_user);
+               if_print(exclude_kernel);
+               if_print(exclude_hv);
+               if_print(exclude_idle);
+               if_print(mmap);
+               if_print(comm);
+               if_print(freq);
+               if_print(inherit_stat);
+               if_print(enable_on_exec);
+               if_print(task);
+               if_print(watermark);
+               if_print(precise_ip);
+               if_print(mmap_data);
+               if_print(sample_id_all);
+               if_print(exclude_host);
+               if_print(exclude_guest);
+               if_print(__reserved_1);
+               if_print(wakeup_events);
+               if_print(bp_type);
+               if_print(branch_sample_type);
+       }
+out:
+       fputc('\n', fp);
+       return ++printed;
+}
+
+bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+                         char *msg, size_t msgsize)
+{
+       if ((err == ENOENT || err == ENXIO) &&
+           evsel->attr.type   == PERF_TYPE_HARDWARE &&
+           evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+               /*
+                * If it's cycles then fall back to hrtimer based
+                * cpu-clock-tick sw counter, which is always available even if
+                * no PMU support.
+                *
+                * PPC returns ENXIO until 2.6.37 (behavior changed with commit
+                * b0a873e).
+                */
+               scnprintf(msg, msgsize, "%s",
+"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
+
+               evsel->attr.type   = PERF_TYPE_SOFTWARE;
+               evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
+
+               free(evsel->name);
+               evsel->name = NULL;
+               return true;
+       }
+
+       return false;
+}
+
+int perf_evsel__open_strerror(struct perf_evsel *evsel,
+                             struct perf_target *target,
+                             int err, char *msg, size_t size)
+{
+       switch (err) {
+       case EPERM:
+       case EACCES:
+               return scnprintf(msg, size, "%s",
+                "You may not have permission to collect %sstats.\n"
+                "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
+                " -1 - Not paranoid at all\n"
+                "  0 - Disallow raw tracepoint access for unpriv\n"
+                "  1 - Disallow cpu events for unpriv\n"
+                "  2 - Disallow kernel profiling for unpriv",
+                                target->system_wide ? "system-wide " : "");
+       case ENOENT:
+               return scnprintf(msg, size, "The %s event is not supported.",
+                                perf_evsel__name(evsel));
+       case EMFILE:
+               return scnprintf(msg, size, "%s",
+                        "Too many events are opened.\n"
+                        "Try again after reducing the number of events.");
+       case ENODEV:
+               if (target->cpu_list)
+                       return scnprintf(msg, size, "%s",
+        "No such device - did you specify an out-of-range profile CPU?\n");
+               break;
+       case EOPNOTSUPP:
+               if (evsel->attr.precise_ip)
+                       return scnprintf(msg, size, "%s",
+       "\'precise\' request may not be supported. Try removing 'p' modifier.");
+#if defined(__i386__) || defined(__x86_64__)
+               if (evsel->attr.type == PERF_TYPE_HARDWARE)
+                       return scnprintf(msg, size, "%s",
+       "No hardware sampling interrupt available.\n"
+       "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
+#endif
+               break;
+       default:
+               break;
+       }
+
+       return scnprintf(msg, size,
+       "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).  \n"
+       "/bin/dmesg may provide additional information.\n"
+       "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
+                        err, strerror(err), perf_evsel__name(evsel));
+}
index 3d2b8017438c372f69458a2a39301c3bdf70425b..52021c3087dfe23b777544164a3ffa7492b70e57 100644 (file)
@@ -53,6 +53,7 @@ struct perf_evsel {
        struct xyarray          *sample_id;
        u64                     *id;
        struct perf_counts      *counts;
+       struct perf_counts      *prev_raw_counts;
        int                     idx;
        u32                     ids;
        struct hists            hists;
@@ -73,10 +74,13 @@ struct perf_evsel {
        bool                    needs_swap;
        /* parse modifier helper */
        int                     exclude_GH;
+       int                     nr_members;
        struct perf_evsel       *leader;
        char                    *group_name;
 };
 
+#define hists_to_evsel(h) container_of(h, struct perf_evsel, hists)
+
 struct cpu_map;
 struct thread_map;
 struct perf_evlist;
@@ -110,14 +114,30 @@ extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
 int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
                                            char *bf, size_t size);
 const char *perf_evsel__name(struct perf_evsel *evsel);
+const char *perf_evsel__group_name(struct perf_evsel *evsel);
+int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
 int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
 void perf_evsel__free_fd(struct perf_evsel *evsel);
 void perf_evsel__free_id(struct perf_evsel *evsel);
+void perf_evsel__free_counts(struct perf_evsel *evsel);
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 
+void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
+                                 enum perf_event_sample_format bit);
+void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
+                                   enum perf_event_sample_format bit);
+
+#define perf_evsel__set_sample_bit(evsel, bit) \
+       __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
+
+#define perf_evsel__reset_sample_bit(evsel, bit) \
+       __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
+
+void perf_evsel__set_sample_id(struct perf_evsel *evsel);
+
 int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
                           const char *filter);
 
@@ -226,8 +246,34 @@ static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
        return list_entry(evsel->node.next, struct perf_evsel, node);
 }
 
-static inline bool perf_evsel__is_group_member(const struct perf_evsel *evsel)
+static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
+{
+       return evsel->leader == evsel;
+}
+
+struct perf_attr_details {
+       bool freq;
+       bool verbose;
+       bool event_group;
+};
+
+int perf_evsel__fprintf(struct perf_evsel *evsel,
+                       struct perf_attr_details *details, FILE *fp);
+
+bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+                         char *msg, size_t msgsize);
+int perf_evsel__open_strerror(struct perf_evsel *evsel,
+                             struct perf_target *target,
+                             int err, char *msg, size_t size);
+
+static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
 {
-       return evsel->leader != NULL;
+       return evsel->idx - evsel->leader->idx;
 }
+
+#define for_each_group_member(_evsel, _leader)                                         \
+for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node);     \
+     (_evsel) && (_evsel)->leader == (_leader);                                        \
+     (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
+
 #endif /* __PERF_EVSEL_H */
index b7da4634a047af37ffb261319b74792bdd5d337c..f4bfd79ef6a7e04aded216bb2c0540ce1dd17deb 100644 (file)
@@ -148,7 +148,7 @@ static char *do_read_string(int fd, struct perf_header *ph)
        u32 len;
        char *buf;
 
-       sz = read(fd, &len, sizeof(len));
+       sz = readn(fd, &len, sizeof(len));
        if (sz < (ssize_t)sizeof(len))
                return NULL;
 
@@ -159,7 +159,7 @@ static char *do_read_string(int fd, struct perf_header *ph)
        if (!buf)
                return NULL;
 
-       ret = read(fd, buf, len);
+       ret = readn(fd, buf, len);
        if (ret == (ssize_t)len) {
                /*
                 * strings are padded by zeroes
@@ -287,12 +287,12 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd)
        struct perf_session *session = container_of(header,
                        struct perf_session, header);
        struct rb_node *nd;
-       int err = machine__write_buildid_table(&session->host_machine, fd);
+       int err = machine__write_buildid_table(&session->machines.host, fd);
 
        if (err)
                return err;
 
-       for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
                err = machine__write_buildid_table(pos, fd);
                if (err)
@@ -313,7 +313,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
        if (is_kallsyms) {
                if (symbol_conf.kptr_restrict) {
                        pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n");
-                       return 0;
+                       err = 0;
+                       goto out_free;
                }
                realname = (char *) name;
        } else
@@ -448,9 +449,9 @@ static int perf_session__cache_build_ids(struct perf_session *session)
        if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
                return -1;
 
-       ret = machine__cache_build_ids(&session->host_machine, debugdir);
+       ret = machine__cache_build_ids(&session->machines.host, debugdir);
 
-       for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
                ret |= machine__cache_build_ids(pos, debugdir);
        }
@@ -467,9 +468,9 @@ static bool machine__read_build_ids(struct machine *machine, bool with_hits)
 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
 {
        struct rb_node *nd;
-       bool ret = machine__read_build_ids(&session->host_machine, with_hits);
+       bool ret = machine__read_build_ids(&session->machines.host, with_hits);
 
-       for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
                ret |= machine__read_build_ids(pos, with_hits);
        }
@@ -954,6 +955,7 @@ static int write_topo_node(int fd, int node)
        }
 
        fclose(fp);
+       fp = NULL;
 
        ret = do_write(fd, &mem_total, sizeof(u64));
        if (ret)
@@ -980,7 +982,8 @@ static int write_topo_node(int fd, int node)
        ret = do_write_string(fd, buf);
 done:
        free(buf);
-       fclose(fp);
+       if (fp)
+               fclose(fp);
        return ret;
 }
 
@@ -1051,16 +1054,25 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
        struct perf_pmu *pmu = NULL;
        off_t offset = lseek(fd, 0, SEEK_CUR);
        __u32 pmu_num = 0;
+       int ret;
 
        /* write real pmu_num later */
-       do_write(fd, &pmu_num, sizeof(pmu_num));
+       ret = do_write(fd, &pmu_num, sizeof(pmu_num));
+       if (ret < 0)
+               return ret;
 
        while ((pmu = perf_pmu__scan(pmu))) {
                if (!pmu->name)
                        continue;
                pmu_num++;
-               do_write(fd, &pmu->type, sizeof(pmu->type));
-               do_write_string(fd, pmu->name);
+
+               ret = do_write(fd, &pmu->type, sizeof(pmu->type));
+               if (ret < 0)
+                       return ret;
+
+               ret = do_write_string(fd, pmu->name);
+               if (ret < 0)
+                       return ret;
        }
 
        if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
@@ -1072,6 +1084,52 @@ static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
        return 0;
 }
 
+/*
+ * File format:
+ *
+ * struct group_descs {
+ *     u32     nr_groups;
+ *     struct group_desc {
+ *             char    name[];
+ *             u32     leader_idx;
+ *             u32     nr_members;
+ *     }[nr_groups];
+ * };
+ */
+static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
+                           struct perf_evlist *evlist)
+{
+       u32 nr_groups = evlist->nr_groups;
+       struct perf_evsel *evsel;
+       int ret;
+
+       ret = do_write(fd, &nr_groups, sizeof(nr_groups));
+       if (ret < 0)
+               return ret;
+
+       list_for_each_entry(evsel, &evlist->entries, node) {
+               if (perf_evsel__is_group_leader(evsel) &&
+                   evsel->nr_members > 1) {
+                       const char *name = evsel->group_name ?: "{anon_group}";
+                       u32 leader_idx = evsel->idx;
+                       u32 nr_members = evsel->nr_members;
+
+                       ret = do_write_string(fd, name);
+                       if (ret < 0)
+                               return ret;
+
+                       ret = do_write(fd, &leader_idx, sizeof(leader_idx));
+                       if (ret < 0)
+                               return ret;
+
+                       ret = do_write(fd, &nr_members, sizeof(nr_members));
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+       return 0;
+}
+
 /*
  * default get_cpuid(): nothing gets recorded
  * actual implementation must be in arch/$(ARCH)/util/header.c
@@ -1209,14 +1267,14 @@ read_event_desc(struct perf_header *ph, int fd)
        size_t msz;
 
        /* number of events */
-       ret = read(fd, &nre, sizeof(nre));
+       ret = readn(fd, &nre, sizeof(nre));
        if (ret != (ssize_t)sizeof(nre))
                goto error;
 
        if (ph->needs_swap)
                nre = bswap_32(nre);
 
-       ret = read(fd, &sz, sizeof(sz));
+       ret = readn(fd, &sz, sizeof(sz));
        if (ret != (ssize_t)sizeof(sz))
                goto error;
 
@@ -1244,7 +1302,7 @@ read_event_desc(struct perf_header *ph, int fd)
                 * must read entire on-file attr struct to
                 * sync up with layout.
                 */
-               ret = read(fd, buf, sz);
+               ret = readn(fd, buf, sz);
                if (ret != (ssize_t)sz)
                        goto error;
 
@@ -1253,7 +1311,7 @@ read_event_desc(struct perf_header *ph, int fd)
 
                memcpy(&evsel->attr, buf, msz);
 
-               ret = read(fd, &nr, sizeof(nr));
+               ret = readn(fd, &nr, sizeof(nr));
                if (ret != (ssize_t)sizeof(nr))
                        goto error;
 
@@ -1274,7 +1332,7 @@ read_event_desc(struct perf_header *ph, int fd)
                evsel->id = id;
 
                for (j = 0 ; j < nr; j++) {
-                       ret = read(fd, id, sizeof(*id));
+                       ret = readn(fd, id, sizeof(*id));
                        if (ret != (ssize_t)sizeof(*id))
                                goto error;
                        if (ph->needs_swap)
@@ -1435,6 +1493,31 @@ error:
        fprintf(fp, "# pmu mappings: unable to read\n");
 }
 
+static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
+                            FILE *fp)
+{
+       struct perf_session *session;
+       struct perf_evsel *evsel;
+       u32 nr = 0;
+
+       session = container_of(ph, struct perf_session, header);
+
+       list_for_each_entry(evsel, &session->evlist->entries, node) {
+               if (perf_evsel__is_group_leader(evsel) &&
+                   evsel->nr_members > 1) {
+                       fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
+                               perf_evsel__name(evsel));
+
+                       nr = evsel->nr_members - 1;
+               } else if (nr) {
+                       fprintf(fp, ",%s", perf_evsel__name(evsel));
+
+                       if (--nr == 0)
+                               fprintf(fp, "}\n");
+               }
+       }
+}
+
 static int __event_process_build_id(struct build_id_event *bev,
                                    char *filename,
                                    struct perf_session *session)
@@ -1506,14 +1589,14 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
        while (offset < limit) {
                ssize_t len;
 
-               if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
+               if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
                        return -1;
 
                if (header->needs_swap)
                        perf_event_header__bswap(&old_bev.header);
 
                len = old_bev.header.size - sizeof(old_bev);
-               if (read(input, filename, len) != len)
+               if (readn(input, filename, len) != len)
                        return -1;
 
                bev.header = old_bev.header;
@@ -1548,14 +1631,14 @@ static int perf_header__read_build_ids(struct perf_header *header,
        while (offset < limit) {
                ssize_t len;
 
-               if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+               if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
                        goto out;
 
                if (header->needs_swap)
                        perf_event_header__bswap(&bev.header);
 
                len = bev.header.size - sizeof(bev);
-               if (read(input, filename, len) != len)
+               if (readn(input, filename, len) != len)
                        goto out;
                /*
                 * The a1645ce1 changeset:
@@ -1641,7 +1724,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
        size_t ret;
        u32 nr;
 
-       ret = read(fd, &nr, sizeof(nr));
+       ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
                return -1;
 
@@ -1650,7 +1733,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
 
        ph->env.nr_cpus_online = nr;
 
-       ret = read(fd, &nr, sizeof(nr));
+       ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
                return -1;
 
@@ -1684,7 +1767,7 @@ static int process_total_mem(struct perf_file_section *section __maybe_unused,
        uint64_t mem;
        size_t ret;
 
-       ret = read(fd, &mem, sizeof(mem));
+       ret = readn(fd, &mem, sizeof(mem));
        if (ret != sizeof(mem))
                return -1;
 
@@ -1756,7 +1839,7 @@ static int process_cmdline(struct perf_file_section *section __maybe_unused,
        u32 nr, i;
        struct strbuf sb;
 
-       ret = read(fd, &nr, sizeof(nr));
+       ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
                return -1;
 
@@ -1792,7 +1875,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
        char *str;
        struct strbuf sb;
 
-       ret = read(fd, &nr, sizeof(nr));
+       ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
                return -1;
 
@@ -1813,7 +1896,7 @@ static int process_cpu_topology(struct perf_file_section *section __maybe_unused
        }
        ph->env.sibling_cores = strbuf_detach(&sb, NULL);
 
-       ret = read(fd, &nr, sizeof(nr));
+       ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
                return -1;
 
@@ -1850,7 +1933,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
        struct strbuf sb;
 
        /* nr nodes */
-       ret = read(fd, &nr, sizeof(nr));
+       ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
                goto error;
 
@@ -1862,15 +1945,15 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
 
        for (i = 0; i < nr; i++) {
                /* node number */
-               ret = read(fd, &node, sizeof(node));
+               ret = readn(fd, &node, sizeof(node));
                if (ret != sizeof(node))
                        goto error;
 
-               ret = read(fd, &mem_total, sizeof(u64));
+               ret = readn(fd, &mem_total, sizeof(u64));
                if (ret != sizeof(u64))
                        goto error;
 
-               ret = read(fd, &mem_free, sizeof(u64));
+               ret = readn(fd, &mem_free, sizeof(u64));
                if (ret != sizeof(u64))
                        goto error;
 
@@ -1909,7 +1992,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
        u32 type;
        struct strbuf sb;
 
-       ret = read(fd, &pmu_num, sizeof(pmu_num));
+       ret = readn(fd, &pmu_num, sizeof(pmu_num));
        if (ret != sizeof(pmu_num))
                return -1;
 
@@ -1925,7 +2008,7 @@ static int process_pmu_mappings(struct perf_file_section *section __maybe_unused
        strbuf_init(&sb, 128);
 
        while (pmu_num) {
-               if (read(fd, &type, sizeof(type)) != sizeof(type))
+               if (readn(fd, &type, sizeof(type)) != sizeof(type))
                        goto error;
                if (ph->needs_swap)
                        type = bswap_32(type);
@@ -1949,6 +2032,98 @@ error:
        return -1;
 }
 
+static int process_group_desc(struct perf_file_section *section __maybe_unused,
+                             struct perf_header *ph, int fd,
+                             void *data __maybe_unused)
+{
+       size_t ret = -1;
+       u32 i, nr, nr_groups;
+       struct perf_session *session;
+       struct perf_evsel *evsel, *leader = NULL;
+       struct group_desc {
+               char *name;
+               u32 leader_idx;
+               u32 nr_members;
+       } *desc;
+
+       if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
+               return -1;
+
+       if (ph->needs_swap)
+               nr_groups = bswap_32(nr_groups);
+
+       ph->env.nr_groups = nr_groups;
+       if (!nr_groups) {
+               pr_debug("group desc not available\n");
+               return 0;
+       }
+
+       desc = calloc(nr_groups, sizeof(*desc));
+       if (!desc)
+               return -1;
+
+       for (i = 0; i < nr_groups; i++) {
+               desc[i].name = do_read_string(fd, ph);
+               if (!desc[i].name)
+                       goto out_free;
+
+               if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
+                       goto out_free;
+
+               if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
+                       goto out_free;
+
+               if (ph->needs_swap) {
+                       desc[i].leader_idx = bswap_32(desc[i].leader_idx);
+                       desc[i].nr_members = bswap_32(desc[i].nr_members);
+               }
+       }
+
+       /*
+        * Rebuild group relationship based on the group_desc
+        */
+       session = container_of(ph, struct perf_session, header);
+       session->evlist->nr_groups = nr_groups;
+
+       i = nr = 0;
+       list_for_each_entry(evsel, &session->evlist->entries, node) {
+               if (evsel->idx == (int) desc[i].leader_idx) {
+                       evsel->leader = evsel;
+                       /* {anon_group} is a dummy name */
+                       if (strcmp(desc[i].name, "{anon_group}"))
+                               evsel->group_name = desc[i].name;
+                       evsel->nr_members = desc[i].nr_members;
+
+                       if (i >= nr_groups || nr > 0) {
+                               pr_debug("invalid group desc\n");
+                               goto out_free;
+                       }
+
+                       leader = evsel;
+                       nr = evsel->nr_members - 1;
+                       i++;
+               } else if (nr) {
+                       /* This is a group member */
+                       evsel->leader = leader;
+
+                       nr--;
+               }
+       }
+
+       if (i != nr_groups || nr != 0) {
+               pr_debug("invalid group desc\n");
+               goto out_free;
+       }
+
+       ret = 0;
+out_free:
+       while ((int) --i >= 0)
+               free(desc[i].name);
+       free(desc);
+
+       return ret;
+}
+
 struct feature_ops {
        int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
        void (*print)(struct perf_header *h, int fd, FILE *fp);
@@ -1988,6 +2163,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPF(HEADER_NUMA_TOPOLOGY,  numa_topology),
        FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
        FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
+       FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
 };
 
 struct header_print_data {
@@ -2077,7 +2253,7 @@ static int perf_header__adds_write(struct perf_header *header,
        if (!nr_sections)
                return 0;
 
-       feat_sec = p = calloc(sizeof(*feat_sec), nr_sections);
+       feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
        if (feat_sec == NULL)
                return -ENOMEM;
 
@@ -2249,7 +2425,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
        if (!nr_sections)
                return 0;
 
-       feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections);
+       feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
        if (!feat_sec)
                return -1;
 
@@ -2912,16 +3088,22 @@ int perf_event__process_tracing_data(union perf_event *event,
                                 session->repipe);
        padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
 
-       if (read(session->fd, buf, padding) < 0)
-               die("reading input file");
+       if (readn(session->fd, buf, padding) < 0) {
+               pr_err("%s: reading input file", __func__);
+               return -1;
+       }
        if (session->repipe) {
                int retw = write(STDOUT_FILENO, buf, padding);
-               if (retw <= 0 || retw != padding)
-                       die("repiping tracing data padding");
+               if (retw <= 0 || retw != padding) {
+                       pr_err("%s: repiping tracing data padding", __func__);
+                       return -1;
+               }
        }
 
-       if (size_read + padding != size)
-               die("tracing data size mismatch");
+       if (size_read + padding != size) {
+               pr_err("%s: tracing data size mismatch", __func__);
+               return -1;
+       }
 
        perf_evlist__prepare_tracepoint_events(session->evlist,
                                               session->pevent);
index 20f0344accb1918e88946d70994cfab13a9c371a..c9fc55cada6d1ceaf04c001f40bc9c052ef566ff 100644 (file)
@@ -29,6 +29,7 @@ enum {
        HEADER_NUMA_TOPOLOGY,
        HEADER_BRANCH_STACK,
        HEADER_PMU_MAPPINGS,
+       HEADER_GROUP_DESC,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
@@ -79,6 +80,7 @@ struct perf_session_env {
        char                    *numa_nodes;
        int                     nr_pmu_mappings;
        char                    *pmu_mappings;
+       int                     nr_groups;
 };
 
 struct perf_header {
index cb17e2a8c6ed49d95929984b3147178b3c02da5d..f855941bebea0afa369137d72306c86e5904a7bd 100644 (file)
@@ -4,6 +4,7 @@
 #include "hist.h"
 #include "session.h"
 #include "sort.h"
+#include "evsel.h"
 #include <math.h>
 
 static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -82,6 +83,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
                hists__new_col_len(hists, HISTC_DSO, len);
        }
 
+       if (h->parent)
+               hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
+
        if (h->branch_info) {
                int symlen;
                /*
@@ -242,6 +246,14 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
 
                if (he->ms.map)
                        he->ms.map->referenced = true;
+
+               if (he->branch_info) {
+                       if (he->branch_info->from.map)
+                               he->branch_info->from.map->referenced = true;
+                       if (he->branch_info->to.map)
+                               he->branch_info->to.map->referenced = true;
+               }
+
                if (symbol_conf.use_callchain)
                        callchain_init(he->callchain);
 
@@ -251,7 +263,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
        return he;
 }
 
-static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
+void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
 {
        if (!h->filtered) {
                hists__calc_col_len(hists, h);
@@ -285,7 +297,13 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
                parent = *p;
                he = rb_entry(parent, struct hist_entry, rb_node_in);
 
-               cmp = hist_entry__cmp(entry, he);
+               /*
+                * Make sure that it receives arguments in a same order as
+                * hist_entry__collapse() so that we can use an appropriate
+                * function when searching an entry regardless which sort
+                * keys were used.
+                */
+               cmp = hist_entry__cmp(he, entry);
 
                if (!cmp) {
                        he_stat__add_period(&he->stat, period);
@@ -523,6 +541,62 @@ void hists__collapse_resort_threaded(struct hists *hists)
  * reverse the map, sort on period.
  */
 
+static int period_cmp(u64 period_a, u64 period_b)
+{
+       if (period_a > period_b)
+               return 1;
+       if (period_a < period_b)
+               return -1;
+       return 0;
+}
+
+static int hist_entry__sort_on_period(struct hist_entry *a,
+                                     struct hist_entry *b)
+{
+       int ret;
+       int i, nr_members;
+       struct perf_evsel *evsel;
+       struct hist_entry *pair;
+       u64 *periods_a, *periods_b;
+
+       ret = period_cmp(a->stat.period, b->stat.period);
+       if (ret || !symbol_conf.event_group)
+               return ret;
+
+       evsel = hists_to_evsel(a->hists);
+       nr_members = evsel->nr_members;
+       if (nr_members <= 1)
+               return ret;
+
+       periods_a = zalloc(sizeof(periods_a) * nr_members);
+       periods_b = zalloc(sizeof(periods_b) * nr_members);
+
+       if (!periods_a || !periods_b)
+               goto out;
+
+       list_for_each_entry(pair, &a->pairs.head, pairs.node) {
+               evsel = hists_to_evsel(pair->hists);
+               periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
+       }
+
+       list_for_each_entry(pair, &b->pairs.head, pairs.node) {
+               evsel = hists_to_evsel(pair->hists);
+               periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
+       }
+
+       for (i = 1; i < nr_members; i++) {
+               ret = period_cmp(periods_a[i], periods_b[i]);
+               if (ret)
+                       break;
+       }
+
+out:
+       free(periods_a);
+       free(periods_b);
+
+       return ret;
+}
+
 static void __hists__insert_output_entry(struct rb_root *entries,
                                         struct hist_entry *he,
                                         u64 min_callchain_hits)
@@ -539,7 +613,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
                parent = *p;
                iter = rb_entry(parent, struct hist_entry, rb_node);
 
-               if (he->stat.period > iter->stat.period)
+               if (hist_entry__sort_on_period(he, iter) > 0)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -711,25 +785,38 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize)
        return symbol__annotate(he->ms.sym, he->ms.map, privsize);
 }
 
+void events_stats__inc(struct events_stats *stats, u32 type)
+{
+       ++stats->nr_events[0];
+       ++stats->nr_events[type];
+}
+
 void hists__inc_nr_events(struct hists *hists, u32 type)
 {
-       ++hists->stats.nr_events[0];
-       ++hists->stats.nr_events[type];
+       events_stats__inc(&hists->stats, type);
 }
 
 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
                                                 struct hist_entry *pair)
 {
-       struct rb_node **p = &hists->entries.rb_node;
+       struct rb_root *root;
+       struct rb_node **p;
        struct rb_node *parent = NULL;
        struct hist_entry *he;
        int cmp;
 
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       p = &root->rb_node;
+
        while (*p != NULL) {
                parent = *p;
-               he = rb_entry(parent, struct hist_entry, rb_node);
+               he = rb_entry(parent, struct hist_entry, rb_node_in);
 
-               cmp = hist_entry__cmp(pair, he);
+               cmp = hist_entry__collapse(he, pair);
 
                if (!cmp)
                        goto out;
@@ -744,8 +831,8 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
        if (he) {
                memset(&he->stat, 0, sizeof(he->stat));
                he->hists = hists;
-               rb_link_node(&he->rb_node, parent, p);
-               rb_insert_color(&he->rb_node, &hists->entries);
+               rb_link_node(&he->rb_node_in, parent, p);
+               rb_insert_color(&he->rb_node_in, root);
                hists__inc_nr_entries(hists, he);
        }
 out:
@@ -755,11 +842,16 @@ out:
 static struct hist_entry *hists__find_entry(struct hists *hists,
                                            struct hist_entry *he)
 {
-       struct rb_node *n = hists->entries.rb_node;
+       struct rb_node *n;
+
+       if (sort__need_collapse)
+               n = hists->entries_collapsed.rb_node;
+       else
+               n = hists->entries_in->rb_node;
 
        while (n) {
-               struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node);
-               int64_t cmp = hist_entry__cmp(he, iter);
+               struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
+               int64_t cmp = hist_entry__collapse(iter, he);
 
                if (cmp < 0)
                        n = n->rb_left;
@@ -777,15 +869,21 @@ static struct hist_entry *hists__find_entry(struct hists *hists,
  */
 void hists__match(struct hists *leader, struct hists *other)
 {
+       struct rb_root *root;
        struct rb_node *nd;
        struct hist_entry *pos, *pair;
 
-       for (nd = rb_first(&leader->entries); nd; nd = rb_next(nd)) {
-               pos  = rb_entry(nd, struct hist_entry, rb_node);
+       if (sort__need_collapse)
+               root = &leader->entries_collapsed;
+       else
+               root = leader->entries_in;
+
+       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+               pos  = rb_entry(nd, struct hist_entry, rb_node_in);
                pair = hists__find_entry(other, pos);
 
                if (pair)
-                       hist__entry_add_pair(pos, pair);
+                       hist_entry__add_pair(pair, pos);
        }
 }
 
@@ -796,17 +894,23 @@ void hists__match(struct hists *leader, struct hists *other)
  */
 int hists__link(struct hists *leader, struct hists *other)
 {
+       struct rb_root *root;
        struct rb_node *nd;
        struct hist_entry *pos, *pair;
 
-       for (nd = rb_first(&other->entries); nd; nd = rb_next(nd)) {
-               pos = rb_entry(nd, struct hist_entry, rb_node);
+       if (sort__need_collapse)
+               root = &other->entries_collapsed;
+       else
+               root = other->entries_in;
+
+       for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+               pos = rb_entry(nd, struct hist_entry, rb_node_in);
 
                if (!hist_entry__has_pairs(pos)) {
                        pair = hists__add_dummy_entry(leader, pos);
                        if (pair == NULL)
                                return -1;
-                       hist__entry_add_pair(pair, pos);
+                       hist_entry__add_pair(pos, pair);
                }
        }
 
index 8b091a51e4a265361f6dc0cdab2b93567bf79943..38624686ee9a145ffaacbcfa063e5d985da750f0 100644 (file)
@@ -96,8 +96,10 @@ void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
                                   bool zap_kernel);
 void hists__output_recalc_col_len(struct hists *hists, int max_rows);
 
+void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h);
 void hists__inc_nr_events(struct hists *self, u32 type);
-size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
+void events_stats__inc(struct events_stats *stats, u32 type);
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
 
 size_t hists__fprintf(struct hists *self, bool show_header, int max_rows,
                      int max_cols, FILE *fp);
@@ -126,13 +128,19 @@ struct perf_hpp {
 };
 
 struct perf_hpp_fmt {
-       bool cond;
        int (*header)(struct perf_hpp *hpp);
        int (*width)(struct perf_hpp *hpp);
        int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
        int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
+
+       struct list_head list;
 };
 
+extern struct list_head perf_hpp__list;
+
+#define perf_hpp__for_each_format(format) \
+       list_for_each_entry(format, &perf_hpp__list, list)
+
 extern struct perf_hpp_fmt perf_hpp__format[];
 
 enum {
@@ -148,14 +156,14 @@ enum {
        PERF_HPP__DELTA,
        PERF_HPP__RATIO,
        PERF_HPP__WEIGHTED_DIFF,
-       PERF_HPP__DISPL,
        PERF_HPP__FORMULA,
 
        PERF_HPP__MAX_INDEX
 };
 
 void perf_hpp__init(void);
-void perf_hpp__column_enable(unsigned col, bool enable);
+void perf_hpp__column_register(struct perf_hpp_fmt *format);
+void perf_hpp__column_enable(unsigned col);
 int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
                                bool color);
 
@@ -219,8 +227,10 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
 
 unsigned int hists__sort_list_width(struct hists *self);
 
-double perf_diff__compute_delta(struct hist_entry *he);
-double perf_diff__compute_ratio(struct hist_entry *he);
-s64 perf_diff__compute_wdiff(struct hist_entry *he);
-int perf_diff__formula(char *buf, size_t size, struct hist_entry *he);
+double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair);
+double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair);
+s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair);
+int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
+                      char *buf, size_t size);
+double perf_diff__period_percent(struct hist_entry *he, u64 period);
 #endif /* __PERF_HIST_H */
index a55d8cf083c98ed4a237a25e1a3844be02673466..45cf10a562bd7958c9edd160ab20a0725ecb0385 100644 (file)
@@ -14,6 +14,7 @@
 #define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
 #define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE)
 
 #define for_each_set_bit(bit, addr, size) \
        for ((bit) = find_first_bit((addr), (size));            \
index 9d0740024ba88550c5db5329babd7e5238abe3b2..11a8d86f7fea3bf4ec5fb8cb05f27dc4c491d948 100644 (file)
@@ -59,16 +59,40 @@ void intlist__remove(struct intlist *ilist, struct int_node *node)
 
 struct int_node *intlist__find(struct intlist *ilist, int i)
 {
-       struct int_node *node = NULL;
-       struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+       struct int_node *node;
+       struct rb_node *rb_node;
 
+       if (ilist == NULL)
+               return NULL;
+
+       node = NULL;
+       rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
        if (rb_node)
                node = container_of(rb_node, struct int_node, rb_node);
 
        return node;
 }
 
-struct intlist *intlist__new(void)
+static int intlist__parse_list(struct intlist *ilist, const char *s)
+{
+       char *sep;
+       int err;
+
+       do {
+               long value = strtol(s, &sep, 10);
+               err = -EINVAL;
+               if (*sep != ',' && *sep != '\0')
+                       break;
+               err = intlist__add(ilist, value);
+               if (err)
+                       break;
+               s = sep + 1;
+       } while (*sep != '\0');
+
+       return err;
+}
+
+struct intlist *intlist__new(const char *slist)
 {
        struct intlist *ilist = malloc(sizeof(*ilist));
 
@@ -77,9 +101,15 @@ struct intlist *intlist__new(void)
                ilist->rblist.node_cmp    = intlist__node_cmp;
                ilist->rblist.node_new    = intlist__node_new;
                ilist->rblist.node_delete = intlist__node_delete;
+
+               if (slist && intlist__parse_list(ilist, slist))
+                       goto out_delete;
        }
 
        return ilist;
+out_delete:
+       intlist__delete(ilist);
+       return NULL;
 }
 
 void intlist__delete(struct intlist *ilist)
index 6d63ab90db508e29fd7b1e8affdaae3bd7d9f401..62351dad848f76c478cced833c315cc71ff1ed63 100644 (file)
@@ -15,7 +15,7 @@ struct intlist {
        struct rblist rblist;
 };
 
-struct intlist *intlist__new(void);
+struct intlist *intlist__new(const char *slist);
 void intlist__delete(struct intlist *ilist);
 
 void intlist__remove(struct intlist *ilist, struct int_node *in);
index 1f09d0581e6b021e37ba5d97f27e5e2b592e6c05..efdb38e65a92f75c0addc2ee16817c28ab07731b 100644 (file)
@@ -1,10 +1,15 @@
+#include "callchain.h"
 #include "debug.h"
 #include "event.h"
+#include "evsel.h"
+#include "hist.h"
 #include "machine.h"
 #include "map.h"
+#include "sort.h"
 #include "strlist.h"
 #include "thread.h"
 #include <stdbool.h>
+#include "unwind.h"
 
 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
 {
@@ -48,6 +53,29 @@ static void dsos__delete(struct list_head *dsos)
        }
 }
 
+void machine__delete_dead_threads(struct machine *machine)
+{
+       struct thread *n, *t;
+
+       list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
+               list_del(&t->node);
+               thread__delete(t);
+       }
+}
+
+void machine__delete_threads(struct machine *machine)
+{
+       struct rb_node *nd = rb_first(&machine->threads);
+
+       while (nd) {
+               struct thread *t = rb_entry(nd, struct thread, rb_node);
+
+               rb_erase(&t->rb_node, &machine->threads);
+               nd = rb_next(nd);
+               thread__delete(t);
+       }
+}
+
 void machine__exit(struct machine *machine)
 {
        map_groups__exit(&machine->kmaps);
@@ -63,10 +91,22 @@ void machine__delete(struct machine *machine)
        free(machine);
 }
 
-struct machine *machines__add(struct rb_root *machines, pid_t pid,
+void machines__init(struct machines *machines)
+{
+       machine__init(&machines->host, "", HOST_KERNEL_ID);
+       machines->guests = RB_ROOT;
+}
+
+void machines__exit(struct machines *machines)
+{
+       machine__exit(&machines->host);
+       /* XXX exit guest */
+}
+
+struct machine *machines__add(struct machines *machines, pid_t pid,
                              const char *root_dir)
 {
-       struct rb_node **p = &machines->rb_node;
+       struct rb_node **p = &machines->guests.rb_node;
        struct rb_node *parent = NULL;
        struct machine *pos, *machine = malloc(sizeof(*machine));
 
@@ -88,18 +128,21 @@ struct machine *machines__add(struct rb_root *machines, pid_t pid,
        }
 
        rb_link_node(&machine->rb_node, parent, p);
-       rb_insert_color(&machine->rb_node, machines);
+       rb_insert_color(&machine->rb_node, &machines->guests);
 
        return machine;
 }
 
-struct machine *machines__find(struct rb_root *machines, pid_t pid)
+struct machine *machines__find(struct machines *machines, pid_t pid)
 {
-       struct rb_node **p = &machines->rb_node;
+       struct rb_node **p = &machines->guests.rb_node;
        struct rb_node *parent = NULL;
        struct machine *machine;
        struct machine *default_machine = NULL;
 
+       if (pid == HOST_KERNEL_ID)
+               return &machines->host;
+
        while (*p != NULL) {
                parent = *p;
                machine = rb_entry(parent, struct machine, rb_node);
@@ -116,7 +159,7 @@ struct machine *machines__find(struct rb_root *machines, pid_t pid)
        return default_machine;
 }
 
-struct machine *machines__findnew(struct rb_root *machines, pid_t pid)
+struct machine *machines__findnew(struct machines *machines, pid_t pid)
 {
        char path[PATH_MAX];
        const char *root_dir = "";
@@ -150,12 +193,12 @@ out:
        return machine;
 }
 
-void machines__process(struct rb_root *machines,
-                      machine__process_t process, void *data)
+void machines__process_guests(struct machines *machines,
+                             machine__process_t process, void *data)
 {
        struct rb_node *nd;
 
-       for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
+       for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
                process(pos, data);
        }
@@ -175,12 +218,14 @@ char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
        return bf;
 }
 
-void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size)
+void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
 {
        struct rb_node *node;
        struct machine *machine;
 
-       for (node = rb_first(machines); node; node = rb_next(node)) {
+       machines->host.id_hdr_size = id_hdr_size;
+
+       for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
                machine = rb_entry(node, struct machine, rb_node);
                machine->id_hdr_size = id_hdr_size;
        }
@@ -264,6 +309,537 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
        return 0;
 }
 
+struct map *machine__new_module(struct machine *machine, u64 start,
+                               const char *filename)
+{
+       struct map *map;
+       struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
+
+       if (dso == NULL)
+               return NULL;
+
+       map = map__new2(start, dso, MAP__FUNCTION);
+       if (map == NULL)
+               return NULL;
+
+       if (machine__is_host(machine))
+               dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+       else
+               dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+       map_groups__insert(&machine->kmaps, map);
+       return map;
+}
+
+size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
+{
+       struct rb_node *nd;
+       size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
+                    __dsos__fprintf(&machines->host.user_dsos, fp);
+
+       for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+               struct machine *pos = rb_entry(nd, struct machine, rb_node);
+               ret += __dsos__fprintf(&pos->kernel_dsos, fp);
+               ret += __dsos__fprintf(&pos->user_dsos, fp);
+       }
+
+       return ret;
+}
+
+size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
+                                    bool (skip)(struct dso *dso, int parm), int parm)
+{
+       return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
+              __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
+}
+
+size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
+                                    bool (skip)(struct dso *dso, int parm), int parm)
+{
+       struct rb_node *nd;
+       size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
+
+       for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+               struct machine *pos = rb_entry(nd, struct machine, rb_node);
+               ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
+       }
+       return ret;
+}
+
+size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
+{
+       int i;
+       size_t printed = 0;
+       struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
+
+       if (kdso->has_build_id) {
+               char filename[PATH_MAX];
+               if (dso__build_id_filename(kdso, filename, sizeof(filename)))
+                       printed += fprintf(fp, "[0] %s\n", filename);
+       }
+
+       for (i = 0; i < vmlinux_path__nr_entries; ++i)
+               printed += fprintf(fp, "[%d] %s\n",
+                                  i + kdso->has_build_id, vmlinux_path[i]);
+
+       return printed;
+}
+
+size_t machine__fprintf(struct machine *machine, FILE *fp)
+{
+       size_t ret = 0;
+       struct rb_node *nd;
+
+       for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
+               struct thread *pos = rb_entry(nd, struct thread, rb_node);
+
+               ret += thread__fprintf(pos, fp);
+       }
+
+       return ret;
+}
+
+static struct dso *machine__get_kernel(struct machine *machine)
+{
+       const char *vmlinux_name = NULL;
+       struct dso *kernel;
+
+       if (machine__is_host(machine)) {
+               vmlinux_name = symbol_conf.vmlinux_name;
+               if (!vmlinux_name)
+                       vmlinux_name = "[kernel.kallsyms]";
+
+               kernel = dso__kernel_findnew(machine, vmlinux_name,
+                                            "[kernel]",
+                                            DSO_TYPE_KERNEL);
+       } else {
+               char bf[PATH_MAX];
+
+               if (machine__is_default_guest(machine))
+                       vmlinux_name = symbol_conf.default_guest_vmlinux_name;
+               if (!vmlinux_name)
+                       vmlinux_name = machine__mmap_name(machine, bf,
+                                                         sizeof(bf));
+
+               kernel = dso__kernel_findnew(machine, vmlinux_name,
+                                            "[guest.kernel]",
+                                            DSO_TYPE_GUEST_KERNEL);
+       }
+
+       if (kernel != NULL && (!kernel->has_build_id))
+               dso__read_running_kernel_build_id(kernel, machine);
+
+       return kernel;
+}
+
+struct process_args {
+       u64 start;
+};
+
+static int symbol__in_kernel(void *arg, const char *name,
+                            char type __maybe_unused, u64 start)
+{
+       struct process_args *args = arg;
+
+       if (strchr(name, '['))
+               return 0;
+
+       args->start = start;
+       return 1;
+}
+
+/* Figure out the start address of kernel map from /proc/kallsyms */
+static u64 machine__get_kernel_start_addr(struct machine *machine)
+{
+       const char *filename;
+       char path[PATH_MAX];
+       struct process_args args;
+
+       if (machine__is_host(machine)) {
+               filename = "/proc/kallsyms";
+       } else {
+               if (machine__is_default_guest(machine))
+                       filename = (char *)symbol_conf.default_guest_kallsyms;
+               else {
+                       sprintf(path, "%s/proc/kallsyms", machine->root_dir);
+                       filename = path;
+               }
+       }
+
+       if (symbol__restricted_filename(filename, "/proc/kallsyms"))
+               return 0;
+
+       if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
+               return 0;
+
+       return args.start;
+}
+
+int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
+{
+       enum map_type type;
+       u64 start = machine__get_kernel_start_addr(machine);
+
+       for (type = 0; type < MAP__NR_TYPES; ++type) {
+               struct kmap *kmap;
+
+               machine->vmlinux_maps[type] = map__new2(start, kernel, type);
+               if (machine->vmlinux_maps[type] == NULL)
+                       return -1;
+
+               machine->vmlinux_maps[type]->map_ip =
+                       machine->vmlinux_maps[type]->unmap_ip =
+                               identity__map_ip;
+               kmap = map__kmap(machine->vmlinux_maps[type]);
+               kmap->kmaps = &machine->kmaps;
+               map_groups__insert(&machine->kmaps,
+                                  machine->vmlinux_maps[type]);
+       }
+
+       return 0;
+}
+
+void machine__destroy_kernel_maps(struct machine *machine)
+{
+       enum map_type type;
+
+       for (type = 0; type < MAP__NR_TYPES; ++type) {
+               struct kmap *kmap;
+
+               if (machine->vmlinux_maps[type] == NULL)
+                       continue;
+
+               kmap = map__kmap(machine->vmlinux_maps[type]);
+               map_groups__remove(&machine->kmaps,
+                                  machine->vmlinux_maps[type]);
+               if (kmap->ref_reloc_sym) {
+                       /*
+                        * ref_reloc_sym is shared among all maps, so free just
+                        * on one of them.
+                        */
+                       if (type == MAP__FUNCTION) {
+                               free((char *)kmap->ref_reloc_sym->name);
+                               kmap->ref_reloc_sym->name = NULL;
+                               free(kmap->ref_reloc_sym);
+                       }
+                       kmap->ref_reloc_sym = NULL;
+               }
+
+               map__delete(machine->vmlinux_maps[type]);
+               machine->vmlinux_maps[type] = NULL;
+       }
+}
+
+int machines__create_guest_kernel_maps(struct machines *machines)
+{
+       int ret = 0;
+       struct dirent **namelist = NULL;
+       int i, items = 0;
+       char path[PATH_MAX];
+       pid_t pid;
+       char *endp;
+
+       if (symbol_conf.default_guest_vmlinux_name ||
+           symbol_conf.default_guest_modules ||
+           symbol_conf.default_guest_kallsyms) {
+               machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
+       }
+
+       if (symbol_conf.guestmount) {
+               items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
+               if (items <= 0)
+                       return -ENOENT;
+               for (i = 0; i < items; i++) {
+                       if (!isdigit(namelist[i]->d_name[0])) {
+                               /* Filter out . and .. */
+                               continue;
+                       }
+                       pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
+                       if ((*endp != '\0') ||
+                           (endp == namelist[i]->d_name) ||
+                           (errno == ERANGE)) {
+                               pr_debug("invalid directory (%s). Skipping.\n",
+                                        namelist[i]->d_name);
+                               continue;
+                       }
+                       sprintf(path, "%s/%s/proc/kallsyms",
+                               symbol_conf.guestmount,
+                               namelist[i]->d_name);
+                       ret = access(path, R_OK);
+                       if (ret) {
+                               pr_debug("Can't access file %s\n", path);
+                               goto failure;
+                       }
+                       machines__create_kernel_maps(machines, pid);
+               }
+failure:
+               free(namelist);
+       }
+
+       return ret;
+}
+
+void machines__destroy_kernel_maps(struct machines *machines)
+{
+       struct rb_node *next = rb_first(&machines->guests);
+
+       machine__destroy_kernel_maps(&machines->host);
+
+       while (next) {
+               struct machine *pos = rb_entry(next, struct machine, rb_node);
+
+               next = rb_next(&pos->rb_node);
+               rb_erase(&pos->rb_node, &machines->guests);
+               machine__delete(pos);
+       }
+}
+
+int machines__create_kernel_maps(struct machines *machines, pid_t pid)
+{
+       struct machine *machine = machines__findnew(machines, pid);
+
+       if (machine == NULL)
+               return -1;
+
+       return machine__create_kernel_maps(machine);
+}
+
+int machine__load_kallsyms(struct machine *machine, const char *filename,
+                          enum map_type type, symbol_filter_t filter)
+{
+       struct map *map = machine->vmlinux_maps[type];
+       int ret = dso__load_kallsyms(map->dso, filename, map, filter);
+
+       if (ret > 0) {
+               dso__set_loaded(map->dso, type);
+               /*
+                * Since /proc/kallsyms will have multiple sessions for the
+                * kernel, with modules between them, fixup the end of all
+                * sections.
+                */
+               __map_groups__fixup_end(&machine->kmaps, type);
+       }
+
+       return ret;
+}
+
+int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
+                              symbol_filter_t filter)
+{
+       struct map *map = machine->vmlinux_maps[type];
+       int ret = dso__load_vmlinux_path(map->dso, map, filter);
+
+       if (ret > 0) {
+               dso__set_loaded(map->dso, type);
+               map__reloc_vmlinux(map);
+       }
+
+       return ret;
+}
+
+static void map_groups__fixup_end(struct map_groups *mg)
+{
+       int i;
+       for (i = 0; i < MAP__NR_TYPES; ++i)
+               __map_groups__fixup_end(mg, i);
+}
+
+static char *get_kernel_version(const char *root_dir)
+{
+       char version[PATH_MAX];
+       FILE *file;
+       char *name, *tmp;
+       const char *prefix = "Linux version ";
+
+       sprintf(version, "%s/proc/version", root_dir);
+       file = fopen(version, "r");
+       if (!file)
+               return NULL;
+
+       version[0] = '\0';
+       tmp = fgets(version, sizeof(version), file);
+       fclose(file);
+
+       name = strstr(version, prefix);
+       if (!name)
+               return NULL;
+       name += strlen(prefix);
+       tmp = strchr(name, ' ');
+       if (tmp)
+               *tmp = '\0';
+
+       return strdup(name);
+}
+
+static int map_groups__set_modules_path_dir(struct map_groups *mg,
+                               const char *dir_name)
+{
+       struct dirent *dent;
+       DIR *dir = opendir(dir_name);
+       int ret = 0;
+
+       if (!dir) {
+               pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
+               return -1;
+       }
+
+       while ((dent = readdir(dir)) != NULL) {
+               char path[PATH_MAX];
+               struct stat st;
+
+               /*sshfs might return bad dent->d_type, so we have to stat*/
+               snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
+               if (stat(path, &st))
+                       continue;
+
+               if (S_ISDIR(st.st_mode)) {
+                       if (!strcmp(dent->d_name, ".") ||
+                           !strcmp(dent->d_name, ".."))
+                               continue;
+
+                       ret = map_groups__set_modules_path_dir(mg, path);
+                       if (ret < 0)
+                               goto out;
+               } else {
+                       char *dot = strrchr(dent->d_name, '.'),
+                            dso_name[PATH_MAX];
+                       struct map *map;
+                       char *long_name;
+
+                       if (dot == NULL || strcmp(dot, ".ko"))
+                               continue;
+                       snprintf(dso_name, sizeof(dso_name), "[%.*s]",
+                                (int)(dot - dent->d_name), dent->d_name);
+
+                       strxfrchar(dso_name, '-', '_');
+                       map = map_groups__find_by_name(mg, MAP__FUNCTION,
+                                                      dso_name);
+                       if (map == NULL)
+                               continue;
+
+                       long_name = strdup(path);
+                       if (long_name == NULL) {
+                               ret = -1;
+                               goto out;
+                       }
+                       dso__set_long_name(map->dso, long_name);
+                       map->dso->lname_alloc = 1;
+                       dso__kernel_module_get_build_id(map->dso, "");
+               }
+       }
+
+out:
+       closedir(dir);
+       return ret;
+}
+
+static int machine__set_modules_path(struct machine *machine)
+{
+       char *version;
+       char modules_path[PATH_MAX];
+
+       version = get_kernel_version(machine->root_dir);
+       if (!version)
+               return -1;
+
+       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+                machine->root_dir, version);
+       free(version);
+
+       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+}
+
+static int machine__create_modules(struct machine *machine)
+{
+       char *line = NULL;
+       size_t n;
+       FILE *file;
+       struct map *map;
+       const char *modules;
+       char path[PATH_MAX];
+
+       if (machine__is_default_guest(machine))
+               modules = symbol_conf.default_guest_modules;
+       else {
+               sprintf(path, "%s/proc/modules", machine->root_dir);
+               modules = path;
+       }
+
+       if (symbol__restricted_filename(path, "/proc/modules"))
+               return -1;
+
+       file = fopen(modules, "r");
+       if (file == NULL)
+               return -1;
+
+       while (!feof(file)) {
+               char name[PATH_MAX];
+               u64 start;
+               char *sep;
+               int line_len;
+
+               line_len = getline(&line, &n, file);
+               if (line_len < 0)
+                       break;
+
+               if (!line)
+                       goto out_failure;
+
+               line[--line_len] = '\0'; /* \n */
+
+               sep = strrchr(line, 'x');
+               if (sep == NULL)
+                       continue;
+
+               hex2u64(sep + 1, &start);
+
+               sep = strchr(line, ' ');
+               if (sep == NULL)
+                       continue;
+
+               *sep = '\0';
+
+               snprintf(name, sizeof(name), "[%s]", line);
+               map = machine__new_module(machine, start, name);
+               if (map == NULL)
+                       goto out_delete_line;
+               dso__kernel_module_get_build_id(map->dso, machine->root_dir);
+       }
+
+       free(line);
+       fclose(file);
+
+       return machine__set_modules_path(machine);
+
+out_delete_line:
+       free(line);
+out_failure:
+       return -1;
+}
+
+int machine__create_kernel_maps(struct machine *machine)
+{
+       struct dso *kernel = machine__get_kernel(machine);
+
+       if (kernel == NULL ||
+           __machine__create_kernel_maps(machine, kernel) < 0)
+               return -1;
+
+       if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
+               if (machine__is_host(machine))
+                       pr_debug("Problems creating module maps, "
+                                "continuing anyway...\n");
+               else
+                       pr_debug("Problems creating module maps for guest %d, "
+                                "continuing anyway...\n", machine->pid);
+       }
+
+       /*
+        * Now that we have all the maps created, just set the ->end of them:
+        */
+       map_groups__fixup_end(&machine->kmaps);
+       return 0;
+}
+
 static void machine__set_kernel_mmap_len(struct machine *machine,
                                         union perf_event *event)
 {
@@ -462,3 +1038,189 @@ int machine__process_event(struct machine *machine, union perf_event *event)
 
        return ret;
 }
+
+void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+       machine->last_match = NULL;
+       rb_erase(&th->rb_node, &machine->threads);
+       /*
+        * We may have references to this thread, for instance in some hist_entry
+        * instances, so just move them to a separate list.
+        */
+       list_add_tail(&th->node, &machine->dead_threads);
+}
+
+static bool symbol__match_parent_regex(struct symbol *sym)
+{
+       if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+               return 1;
+
+       return 0;
+}
+
+static const u8 cpumodes[] = {
+       PERF_RECORD_MISC_USER,
+       PERF_RECORD_MISC_KERNEL,
+       PERF_RECORD_MISC_GUEST_USER,
+       PERF_RECORD_MISC_GUEST_KERNEL
+};
+#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
+
+static void ip__resolve_ams(struct machine *machine, struct thread *thread,
+                           struct addr_map_symbol *ams,
+                           u64 ip)
+{
+       struct addr_location al;
+       size_t i;
+       u8 m;
+
+       memset(&al, 0, sizeof(al));
+
+       for (i = 0; i < NCPUMODES; i++) {
+               m = cpumodes[i];
+               /*
+                * We cannot use the header.misc hint to determine whether a
+                * branch stack address is user, kernel, guest, hypervisor.
+                * Branches may straddle the kernel/user/hypervisor boundaries.
+                * Thus, we have to try consecutively until we find a match
+                * or else, the symbol is unknown
+                */
+               thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
+                               ip, &al, NULL);
+               if (al.sym)
+                       goto found;
+       }
+found:
+       ams->addr = ip;
+       ams->al_addr = al.addr;
+       ams->sym = al.sym;
+       ams->map = al.map;
+}
+
+struct branch_info *machine__resolve_bstack(struct machine *machine,
+                                           struct thread *thr,
+                                           struct branch_stack *bs)
+{
+       struct branch_info *bi;
+       unsigned int i;
+
+       bi = calloc(bs->nr, sizeof(struct branch_info));
+       if (!bi)
+               return NULL;
+
+       for (i = 0; i < bs->nr; i++) {
+               ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
+               ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
+               bi[i].flags = bs->entries[i].flags;
+       }
+       return bi;
+}
+
+static int machine__resolve_callchain_sample(struct machine *machine,
+                                            struct thread *thread,
+                                            struct ip_callchain *chain,
+                                            struct symbol **parent)
+
+{
+       u8 cpumode = PERF_RECORD_MISC_USER;
+       unsigned int i;
+       int err;
+
+       callchain_cursor_reset(&callchain_cursor);
+
+       if (chain->nr > PERF_MAX_STACK_DEPTH) {
+               pr_warning("corrupted callchain. skipping...\n");
+               return 0;
+       }
+
+       for (i = 0; i < chain->nr; i++) {
+               u64 ip;
+               struct addr_location al;
+
+               if (callchain_param.order == ORDER_CALLEE)
+                       ip = chain->ips[i];
+               else
+                       ip = chain->ips[chain->nr - i - 1];
+
+               if (ip >= PERF_CONTEXT_MAX) {
+                       switch (ip) {
+                       case PERF_CONTEXT_HV:
+                               cpumode = PERF_RECORD_MISC_HYPERVISOR;
+                               break;
+                       case PERF_CONTEXT_KERNEL:
+                               cpumode = PERF_RECORD_MISC_KERNEL;
+                               break;
+                       case PERF_CONTEXT_USER:
+                               cpumode = PERF_RECORD_MISC_USER;
+                               break;
+                       default:
+                               pr_debug("invalid callchain context: "
+                                        "%"PRId64"\n", (s64) ip);
+                               /*
+                                * It seems the callchain is corrupted.
+                                * Discard all.
+                                */
+                               callchain_cursor_reset(&callchain_cursor);
+                               return 0;
+                       }
+                       continue;
+               }
+
+               al.filtered = false;
+               thread__find_addr_location(thread, machine, cpumode,
+                                          MAP__FUNCTION, ip, &al, NULL);
+               if (al.sym != NULL) {
+                       if (sort__has_parent && !*parent &&
+                           symbol__match_parent_regex(al.sym))
+                               *parent = al.sym;
+                       if (!symbol_conf.use_callchain)
+                               break;
+               }
+
+               err = callchain_cursor_append(&callchain_cursor,
+                                             ip, al.map, al.sym);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int unwind_entry(struct unwind_entry *entry, void *arg)
+{
+       struct callchain_cursor *cursor = arg;
+       return callchain_cursor_append(cursor, entry->ip,
+                                      entry->map, entry->sym);
+}
+
+int machine__resolve_callchain(struct machine *machine,
+                              struct perf_evsel *evsel,
+                              struct thread *thread,
+                              struct perf_sample *sample,
+                              struct symbol **parent)
+
+{
+       int ret;
+
+       callchain_cursor_reset(&callchain_cursor);
+
+       ret = machine__resolve_callchain_sample(machine, thread,
+                                               sample->callchain, parent);
+       if (ret)
+               return ret;
+
+       /* Can we do dwarf post unwind? */
+       if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
+             (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
+               return 0;
+
+       /* Bail out if nothing was captured. */
+       if ((!sample->user_regs.regs) ||
+           (!sample->user_stack.size))
+               return 0;
+
+       return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
+                                  thread, evsel->attr.sample_regs_user,
+                                  sample);
+
+}
index b7cde7467d55018f82c254bf8d6cf49cf70db30e..5ac5892f23264a76c39b092c0b4933fd95a9779c 100644 (file)
@@ -47,23 +47,32 @@ int machine__process_event(struct machine *machine, union perf_event *event);
 
 typedef void (*machine__process_t)(struct machine *machine, void *data);
 
-void machines__process(struct rb_root *machines,
-                      machine__process_t process, void *data);
+struct machines {
+       struct machine host;
+       struct rb_root guests;
+};
+
+void machines__init(struct machines *machines);
+void machines__exit(struct machines *machines);
 
-struct machine *machines__add(struct rb_root *machines, pid_t pid,
+void machines__process_guests(struct machines *machines,
+                             machine__process_t process, void *data);
+
+struct machine *machines__add(struct machines *machines, pid_t pid,
                              const char *root_dir);
-struct machine *machines__find_host(struct rb_root *machines);
-struct machine *machines__find(struct rb_root *machines, pid_t pid);
-struct machine *machines__findnew(struct rb_root *machines, pid_t pid);
+struct machine *machines__find_host(struct machines *machines);
+struct machine *machines__find(struct machines *machines, pid_t pid);
+struct machine *machines__findnew(struct machines *machines, pid_t pid);
 
-void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size);
+void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
 char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
 
 int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
 void machine__exit(struct machine *machine);
+void machine__delete_dead_threads(struct machine *machine);
+void machine__delete_threads(struct machine *machine);
 void machine__delete(struct machine *machine);
 
-
 struct branch_info *machine__resolve_bstack(struct machine *machine,
                                            struct thread *thread,
                                            struct branch_stack *bs);
@@ -129,19 +138,19 @@ int machine__load_kallsyms(struct machine *machine, const char *filename,
 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
                               symbol_filter_t filter);
 
-size_t machine__fprintf_dsos_buildid(struct machine *machine,
-                                    FILE *fp, bool with_hits);
-size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp);
-size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
-                                     FILE *fp, bool with_hits);
+size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
+                                    bool (skip)(struct dso *dso, int parm), int parm);
+size_t machines__fprintf_dsos(struct machines *machines, FILE *fp);
+size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
+                                    bool (skip)(struct dso *dso, int parm), int parm);
 
 void machine__destroy_kernel_maps(struct machine *machine);
 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel);
 int machine__create_kernel_maps(struct machine *machine);
 
-int machines__create_kernel_maps(struct rb_root *machines, pid_t pid);
-int machines__create_guest_kernel_maps(struct rb_root *machines);
-void machines__destroy_guest_kernel_maps(struct rb_root *machines);
+int machines__create_kernel_maps(struct machines *machines, pid_t pid);
+int machines__create_guest_kernel_maps(struct machines *machines);
+void machines__destroy_kernel_maps(struct machines *machines);
 
 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
 
index 0328d45c4f2af4571785812b32b94bd692ad36e1..6fcb9de623401b8ac731c3c240eb1cd5863683f4 100644 (file)
@@ -11,6 +11,7 @@
 #include "strlist.h"
 #include "vdso.h"
 #include "build-id.h"
+#include <linux/string.h>
 
 const char *map_type__name[MAP__NR_TYPES] = {
        [MAP__FUNCTION] = "Functions",
@@ -19,7 +20,8 @@ const char *map_type__name[MAP__NR_TYPES] = {
 
 static inline int is_anon_memory(const char *filename)
 {
-       return strcmp(filename, "//anon") == 0;
+       return !strcmp(filename, "//anon") ||
+              !strcmp(filename, "/anon_hugepage (deleted)");
 }
 
 static inline int is_no_dso_memory(const char *filename)
@@ -28,29 +30,29 @@ static inline int is_no_dso_memory(const char *filename)
               !strcmp(filename, "[heap]");
 }
 
-void map__init(struct map *self, enum map_type type,
+void map__init(struct map *map, enum map_type type,
               u64 start, u64 end, u64 pgoff, struct dso *dso)
 {
-       self->type     = type;
-       self->start    = start;
-       self->end      = end;
-       self->pgoff    = pgoff;
-       self->dso      = dso;
-       self->map_ip   = map__map_ip;
-       self->unmap_ip = map__unmap_ip;
-       RB_CLEAR_NODE(&self->rb_node);
-       self->groups   = NULL;
-       self->referenced = false;
-       self->erange_warned = false;
+       map->type     = type;
+       map->start    = start;
+       map->end      = end;
+       map->pgoff    = pgoff;
+       map->dso      = dso;
+       map->map_ip   = map__map_ip;
+       map->unmap_ip = map__unmap_ip;
+       RB_CLEAR_NODE(&map->rb_node);
+       map->groups   = NULL;
+       map->referenced = false;
+       map->erange_warned = false;
 }
 
 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                     u64 pgoff, u32 pid, char *filename,
                     enum map_type type)
 {
-       struct map *self = malloc(sizeof(*self));
+       struct map *map = malloc(sizeof(*map));
 
-       if (self != NULL) {
+       if (map != NULL) {
                char newfilename[PATH_MAX];
                struct dso *dso;
                int anon, no_dso, vdso;
@@ -73,10 +75,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                if (dso == NULL)
                        goto out_delete;
 
-               map__init(self, type, start, start + len, pgoff, dso);
+               map__init(map, type, start, start + len, pgoff, dso);
 
                if (anon || no_dso) {
-                       self->map_ip = self->unmap_ip = identity__map_ip;
+                       map->map_ip = map->unmap_ip = identity__map_ip;
 
                        /*
                         * Set memory without DSO as loaded. All map__find_*
@@ -84,12 +86,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                         * unnecessary map__load warning.
                         */
                        if (no_dso)
-                               dso__set_loaded(dso, self->type);
+                               dso__set_loaded(dso, map->type);
                }
        }
-       return self;
+       return map;
 out_delete:
-       free(self);
+       free(map);
        return NULL;
 }
 
@@ -112,48 +114,48 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
        return map;
 }
 
-void map__delete(struct map *self)
+void map__delete(struct map *map)
 {
-       free(self);
+       free(map);
 }
 
-void map__fixup_start(struct map *self)
+void map__fixup_start(struct map *map)
 {
-       struct rb_root *symbols = &self->dso->symbols[self->type];
+       struct rb_root *symbols = &map->dso->symbols[map->type];
        struct rb_node *nd = rb_first(symbols);
        if (nd != NULL) {
                struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
-               self->start = sym->start;
+               map->start = sym->start;
        }
 }
 
-void map__fixup_end(struct map *self)
+void map__fixup_end(struct map *map)
 {
-       struct rb_root *symbols = &self->dso->symbols[self->type];
+       struct rb_root *symbols = &map->dso->symbols[map->type];
        struct rb_node *nd = rb_last(symbols);
        if (nd != NULL) {
                struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
-               self->end = sym->end;
+               map->end = sym->end;
        }
 }
 
 #define DSO__DELETED "(deleted)"
 
-int map__load(struct map *self, symbol_filter_t filter)
+int map__load(struct map *map, symbol_filter_t filter)
 {
-       const char *name = self->dso->long_name;
+       const char *name = map->dso->long_name;
        int nr;
 
-       if (dso__loaded(self->dso, self->type))
+       if (dso__loaded(map->dso, map->type))
                return 0;
 
-       nr = dso__load(self->dso, self, filter);
+       nr = dso__load(map->dso, map, filter);
        if (nr < 0) {
-               if (self->dso->has_build_id) {
+               if (map->dso->has_build_id) {
                        char sbuild_id[BUILD_ID_SIZE * 2 + 1];
 
-                       build_id__sprintf(self->dso->build_id,
-                                         sizeof(self->dso->build_id),
+                       build_id__sprintf(map->dso->build_id,
+                                         sizeof(map->dso->build_id),
                                          sbuild_id);
                        pr_warning("%s with build id %s not found",
                                   name, sbuild_id);
@@ -183,43 +185,36 @@ int map__load(struct map *self, symbol_filter_t filter)
         * Only applies to the kernel, as its symtabs aren't relative like the
         * module ones.
         */
-       if (self->dso->kernel)
-               map__reloc_vmlinux(self);
+       if (map->dso->kernel)
+               map__reloc_vmlinux(map);
 
        return 0;
 }
 
-struct symbol *map__find_symbol(struct map *self, u64 addr,
+struct symbol *map__find_symbol(struct map *map, u64 addr,
                                symbol_filter_t filter)
 {
-       if (map__load(self, filter) < 0)
+       if (map__load(map, filter) < 0)
                return NULL;
 
-       return dso__find_symbol(self->dso, self->type, addr);
+       return dso__find_symbol(map->dso, map->type, addr);
 }
 
-struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
                                        symbol_filter_t filter)
 {
-       if (map__load(self, filter) < 0)
+       if (map__load(map, filter) < 0)
                return NULL;
 
-       if (!dso__sorted_by_name(self->dso, self->type))
-               dso__sort_by_name(self->dso, self->type);
+       if (!dso__sorted_by_name(map->dso, map->type))
+               dso__sort_by_name(map->dso, map->type);
 
-       return dso__find_symbol_by_name(self->dso, self->type, name);
+       return dso__find_symbol_by_name(map->dso, map->type, name);
 }
 
-struct map *map__clone(struct map *self)
+struct map *map__clone(struct map *map)
 {
-       struct map *map = malloc(sizeof(*self));
-
-       if (!map)
-               return NULL;
-
-       memcpy(map, self, sizeof(*self));
-
-       return map;
+       return memdup(map, sizeof(*map));
 }
 
 int map__overlap(struct map *l, struct map *r)
@@ -236,10 +231,10 @@ int map__overlap(struct map *l, struct map *r)
        return 0;
 }
 
-size_t map__fprintf(struct map *self, FILE *fp)
+size_t map__fprintf(struct map *map, FILE *fp)
 {
        return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
-                      self->start, self->end, self->pgoff, self->dso->name);
+                      map->start, map->end, map->pgoff, map->dso->name);
 }
 
 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
@@ -527,9 +522,9 @@ static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
        return ip - (s64)map->pgoff;
 }
 
-void map__reloc_vmlinux(struct map *self)
+void map__reloc_vmlinux(struct map *map)
 {
-       struct kmap *kmap = map__kmap(self);
+       struct kmap *kmap = map__kmap(map);
        s64 reloc;
 
        if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
@@ -541,9 +536,9 @@ void map__reloc_vmlinux(struct map *self)
        if (!reloc)
                return;
 
-       self->map_ip   = map__reloc_map_ip;
-       self->unmap_ip = map__reloc_unmap_ip;
-       self->pgoff    = reloc;
+       map->map_ip   = map__reloc_map_ip;
+       map->unmap_ip = map__reloc_unmap_ip;
+       map->pgoff    = reloc;
 }
 
 void maps__insert(struct rb_root *maps, struct map *map)
@@ -566,9 +561,9 @@ void maps__insert(struct rb_root *maps, struct map *map)
        rb_insert_color(&map->rb_node, maps);
 }
 
-void maps__remove(struct rb_root *self, struct map *map)
+void maps__remove(struct rb_root *maps, struct map *map)
 {
-       rb_erase(&map->rb_node, self);
+       rb_erase(&map->rb_node, maps);
 }
 
 struct map *maps__find(struct rb_root *maps, u64 ip)
index bcb39e2a69651fed82758f5fea299f4d4dda2f41..a887f2c9dfbb0248d89640ea9609cabdc8907849 100644 (file)
@@ -57,9 +57,9 @@ struct map_groups {
        struct machine   *machine;
 };
 
-static inline struct kmap *map__kmap(struct map *self)
+static inline struct kmap *map__kmap(struct map *map)
 {
-       return (struct kmap *)(self + 1);
+       return (struct kmap *)(map + 1);
 }
 
 static inline u64 map__map_ip(struct map *map, u64 ip)
@@ -85,27 +85,27 @@ struct symbol;
 
 typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
-void map__init(struct map *self, enum map_type type,
+void map__init(struct map *map, enum map_type type,
               u64 start, u64 end, u64 pgoff, struct dso *dso);
 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                     u64 pgoff, u32 pid, char *filename,
                     enum map_type type);
 struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
-void map__delete(struct map *self);
-struct map *map__clone(struct map *self);
+void map__delete(struct map *map);
+struct map *map__clone(struct map *map);
 int map__overlap(struct map *l, struct map *r);
-size_t map__fprintf(struct map *self, FILE *fp);
+size_t map__fprintf(struct map *map, FILE *fp);
 size_t map__fprintf_dsoname(struct map *map, FILE *fp);
 
-int map__load(struct map *self, symbol_filter_t filter);
-struct symbol *map__find_symbol(struct map *self,
+int map__load(struct map *map, symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *map,
                                u64 addr, symbol_filter_t filter);
-struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+struct symbol *map__find_symbol_by_name(struct map *map, const char *name,
                                        symbol_filter_t filter);
-void map__fixup_start(struct map *self);
-void map__fixup_end(struct map *self);
+void map__fixup_start(struct map *map);
+void map__fixup_end(struct map *map);
 
-void map__reloc_vmlinux(struct map *self);
+void map__reloc_vmlinux(struct map *map);
 
 size_t __map_groups__fprintf_maps(struct map_groups *mg,
                                  enum map_type type, int verbose, FILE *fp);
index 2d8d53bec17e35dd58f1eced0a710f48980f2ec2..c84f48cf96782e900064d1ff7d277477bcd5919e 100644 (file)
@@ -380,8 +380,8 @@ static int add_tracepoint(struct list_head **listp, int *idx,
        return 0;
 }
 
-static int add_tracepoint_multi(struct list_head **list, int *idx,
-                               char *sys_name, char *evt_name)
+static int add_tracepoint_multi_event(struct list_head **list, int *idx,
+                                     char *sys_name, char *evt_name)
 {
        char evt_path[MAXPATHLEN];
        struct dirent *evt_ent;
@@ -408,6 +408,47 @@ static int add_tracepoint_multi(struct list_head **list, int *idx,
                ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
        }
 
+       closedir(evt_dir);
+       return ret;
+}
+
+static int add_tracepoint_event(struct list_head **list, int *idx,
+                               char *sys_name, char *evt_name)
+{
+       return strpbrk(evt_name, "*?") ?
+              add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
+              add_tracepoint(list, idx, sys_name, evt_name);
+}
+
+static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
+                                   char *sys_name, char *evt_name)
+{
+       struct dirent *events_ent;
+       DIR *events_dir;
+       int ret = 0;
+
+       events_dir = opendir(tracing_events_path);
+       if (!events_dir) {
+               perror("Can't open event dir");
+               return -1;
+       }
+
+       while (!ret && (events_ent = readdir(events_dir))) {
+               if (!strcmp(events_ent->d_name, ".")
+                   || !strcmp(events_ent->d_name, "..")
+                   || !strcmp(events_ent->d_name, "enable")
+                   || !strcmp(events_ent->d_name, "header_event")
+                   || !strcmp(events_ent->d_name, "header_page"))
+                       continue;
+
+               if (!strglobmatch(events_ent->d_name, sys_name))
+                       continue;
+
+               ret = add_tracepoint_event(list, idx, events_ent->d_name,
+                                          evt_name);
+       }
+
+       closedir(events_dir);
        return ret;
 }
 
@@ -420,9 +461,10 @@ int parse_events_add_tracepoint(struct list_head **list, int *idx,
        if (ret)
                return ret;
 
-       return strpbrk(event, "*?") ?
-              add_tracepoint_multi(list, idx, sys, event) :
-              add_tracepoint(list, idx, sys, event);
+       if (strpbrk(sys, "*?"))
+               return add_tracepoint_multi_sys(list, idx, sys, event);
+       else
+               return add_tracepoint_event(list, idx, sys, event);
 }
 
 static int
@@ -492,7 +534,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
 }
 
 static int config_term(struct perf_event_attr *attr,
-                      struct parse_events__term *term)
+                      struct parse_events_term *term)
 {
 #define CHECK_TYPE_VAL(type)                                   \
 do {                                                           \
@@ -537,7 +579,7 @@ do {                                                                \
 static int config_attr(struct perf_event_attr *attr,
                       struct list_head *head, int fail)
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
        list_for_each_entry(term, head, list)
                if (config_term(attr, term) && fail)
@@ -563,14 +605,14 @@ int parse_events_add_numeric(struct list_head **list, int *idx,
        return add_event(list, idx, &attr, NULL);
 }
 
-static int parse_events__is_name_term(struct parse_events__term *term)
+static int parse_events__is_name_term(struct parse_events_term *term)
 {
        return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
 }
 
 static char *pmu_event_name(struct list_head *head_terms)
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
        list_for_each_entry(term, head_terms, list)
                if (parse_events__is_name_term(term))
@@ -657,14 +699,6 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
        int exclude = eu | ek | eh;
        int exclude_GH = evsel ? evsel->exclude_GH : 0;
 
-       /*
-        * We are here for group and 'GH' was not set as event
-        * modifier and whatever event/group modifier override
-        * default 'GH' setup.
-        */
-       if (evsel && !exclude_GH)
-               eH = eG = 0;
-
        memset(mod, 0, sizeof(*mod));
 
        while (*str) {
@@ -814,7 +848,7 @@ static int parse_events__scanner(const char *str, void *data, int start_token)
  */
 int parse_events_terms(struct list_head *terms, const char *str)
 {
-       struct parse_events_data__terms data = {
+       struct parse_events_terms data = {
                .terms = NULL,
        };
        int ret;
@@ -830,10 +864,9 @@ int parse_events_terms(struct list_head *terms, const char *str)
        return ret;
 }
 
-int parse_events(struct perf_evlist *evlist, const char *str,
-                int unset __maybe_unused)
+int parse_events(struct perf_evlist *evlist, const char *str)
 {
-       struct parse_events_data__events data = {
+       struct parse_events_evlist data = {
                .list = LIST_HEAD_INIT(data.list),
                .idx  = evlist->nr_entries,
        };
@@ -843,6 +876,7 @@ int parse_events(struct perf_evlist *evlist, const char *str,
        if (!ret) {
                int entries = data.idx - evlist->nr_entries;
                perf_evlist__splice_list_tail(evlist, &data.list, entries);
+               evlist->nr_groups += data.nr_groups;
                return 0;
        }
 
@@ -858,7 +892,7 @@ int parse_events_option(const struct option *opt, const char *str,
                        int unset __maybe_unused)
 {
        struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
-       int ret = parse_events(evlist, str, unset);
+       int ret = parse_events(evlist, str);
 
        if (ret) {
                fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
@@ -1121,16 +1155,16 @@ void print_events(const char *event_glob, bool name_only)
        print_tracepoint_events(NULL, NULL, name_only);
 }
 
-int parse_events__is_hardcoded_term(struct parse_events__term *term)
+int parse_events__is_hardcoded_term(struct parse_events_term *term)
 {
        return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
 }
 
-static int new_term(struct parse_events__term **_term, int type_val,
+static int new_term(struct parse_events_term **_term, int type_val,
                    int type_term, char *config,
                    char *str, u64 num)
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
        term = zalloc(sizeof(*term));
        if (!term)
@@ -1156,21 +1190,21 @@ static int new_term(struct parse_events__term **_term, int type_val,
        return 0;
 }
 
-int parse_events__term_num(struct parse_events__term **term,
+int parse_events_term__num(struct parse_events_term **term,
                           int type_term, char *config, u64 num)
 {
        return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
                        config, NULL, num);
 }
 
-int parse_events__term_str(struct parse_events__term **term,
+int parse_events_term__str(struct parse_events_term **term,
                           int type_term, char *config, char *str)
 {
        return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
                        config, str, 0);
 }
 
-int parse_events__term_sym_hw(struct parse_events__term **term,
+int parse_events_term__sym_hw(struct parse_events_term **term,
                              char *config, unsigned idx)
 {
        struct event_symbol *sym;
@@ -1188,8 +1222,8 @@ int parse_events__term_sym_hw(struct parse_events__term **term,
                                (char *) "event", (char *) sym->symbol, 0);
 }
 
-int parse_events__term_clone(struct parse_events__term **new,
-                            struct parse_events__term *term)
+int parse_events_term__clone(struct parse_events_term **new,
+                            struct parse_events_term *term)
 {
        return new_term(new, term->type_val, term->type_term, term->config,
                        term->val.str, term->val.num);
@@ -1197,7 +1231,7 @@ int parse_events__term_clone(struct parse_events__term **new,
 
 void parse_events__free_terms(struct list_head *terms)
 {
-       struct parse_events__term *term, *h;
+       struct parse_events_term *term, *h;
 
        list_for_each_entry_safe(term, h, terms, list)
                free(term);
index b7af80b8bdda188842a35d81e0a66e4d01a18c4c..8a4859315fd97266fa592ea775d993b9c003a48d 100644 (file)
@@ -29,8 +29,7 @@ const char *event_type(int type);
 
 extern int parse_events_option(const struct option *opt, const char *str,
                               int unset);
-extern int parse_events(struct perf_evlist *evlist, const char *str,
-                       int unset);
+extern int parse_events(struct perf_evlist *evlist, const char *str);
 extern int parse_events_terms(struct list_head *terms, const char *str);
 extern int parse_filter(const struct option *opt, const char *str, int unset);
 
@@ -51,7 +50,7 @@ enum {
        PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
 };
 
-struct parse_events__term {
+struct parse_events_term {
        char *config;
        union {
                char *str;
@@ -62,24 +61,25 @@ struct parse_events__term {
        struct list_head list;
 };
 
-struct parse_events_data__events {
+struct parse_events_evlist {
        struct list_head list;
        int idx;
+       int nr_groups;
 };
 
-struct parse_events_data__terms {
+struct parse_events_terms {
        struct list_head *terms;
 };
 
-int parse_events__is_hardcoded_term(struct parse_events__term *term);
-int parse_events__term_num(struct parse_events__term **_term,
+int parse_events__is_hardcoded_term(struct parse_events_term *term);
+int parse_events_term__num(struct parse_events_term **_term,
                           int type_term, char *config, u64 num);
-int parse_events__term_str(struct parse_events__term **_term,
+int parse_events_term__str(struct parse_events_term **_term,
                           int type_term, char *config, char *str);
-int parse_events__term_sym_hw(struct parse_events__term **term,
+int parse_events_term__sym_hw(struct parse_events_term **term,
                              char *config, unsigned idx);
-int parse_events__term_clone(struct parse_events__term **new,
-                            struct parse_events__term *term);
+int parse_events_term__clone(struct parse_events_term **new,
+                            struct parse_events_term *term);
 void parse_events__free_terms(struct list_head *terms);
 int parse_events__modifier_event(struct list_head *list, char *str, bool add);
 int parse_events__modifier_group(struct list_head *list, char *event_mod);
index 0f9914ae6bacb0b801650cb1c9449852b8a0ff1f..afc44c18dfe17b054fe01e10c779dbe747373558 100644 (file)
@@ -1,5 +1,4 @@
 %pure-parser
-%name-prefix "parse_events_"
 %parse-param {void *_data}
 %parse-param {void *scanner}
 %lex-param {void* scanner}
@@ -23,6 +22,14 @@ do { \
                YYABORT; \
 } while (0)
 
+static inc_group_count(struct list_head *list,
+                      struct parse_events_evlist *data)
+{
+       /* Count groups only have more than 1 members */
+       if (!list_is_last(list->next, list))
+               data->nr_groups++;
+}
+
 %}
 
 %token PE_START_EVENTS PE_START_TERMS
@@ -68,7 +75,7 @@ do { \
        char *str;
        u64 num;
        struct list_head *head;
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 }
 %%
 
@@ -79,7 +86,7 @@ PE_START_TERMS  start_terms
 
 start_events: groups
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
 
        parse_events_update_lists($1, &data->list);
 }
@@ -123,6 +130,7 @@ PE_NAME '{' events '}'
 {
        struct list_head *list = $3;
 
+       inc_group_count(list, _data);
        parse_events__set_leader($1, list);
        $$ = list;
 }
@@ -131,6 +139,7 @@ PE_NAME '{' events '}'
 {
        struct list_head *list = $2;
 
+       inc_group_count(list, _data);
        parse_events__set_leader(NULL, list);
        $$ = list;
 }
@@ -186,7 +195,7 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME '/' event_config '/'
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
@@ -202,7 +211,7 @@ PE_VALUE_SYM_SW
 event_legacy_symbol:
 value_sym '/' event_config '/'
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
        int type = $1 >> 16;
        int config = $1 & 255;
@@ -215,7 +224,7 @@ value_sym '/' event_config '/'
 |
 value_sym sep_slash_dc
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
        int type = $1 >> 16;
        int config = $1 & 255;
@@ -228,7 +237,7 @@ value_sym sep_slash_dc
 event_legacy_cache:
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
@@ -237,7 +246,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
 |
 PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
@@ -246,7 +255,7 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
 |
 PE_NAME_CACHE_TYPE
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
@@ -256,7 +265,7 @@ PE_NAME_CACHE_TYPE
 event_legacy_mem:
 PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
@@ -266,7 +275,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
 |
 PE_PREFIX_MEM PE_VALUE sep_dc
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
@@ -277,7 +286,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
 event_legacy_tracepoint:
 PE_NAME ':' PE_NAME
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
@@ -287,7 +296,7 @@ PE_NAME ':' PE_NAME
 event_legacy_numeric:
 PE_VALUE ':' PE_VALUE
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL));
@@ -297,7 +306,7 @@ PE_VALUE ':' PE_VALUE
 event_legacy_raw:
 PE_RAW
 {
-       struct parse_events_data__events *data = _data;
+       struct parse_events_evlist *data = _data;
        struct list_head *list = NULL;
 
        ABORT_ON(parse_events_add_numeric(&list, &data->idx,
@@ -307,7 +316,7 @@ PE_RAW
 
 start_terms: event_config
 {
-       struct parse_events_data__terms *data = _data;
+       struct parse_events_terms *data = _data;
        data->terms = $1;
 }
 
@@ -315,7 +324,7 @@ event_config:
 event_config ',' event_term
 {
        struct list_head *head = $1;
-       struct parse_events__term *term = $3;
+       struct parse_events_term *term = $3;
 
        ABORT_ON(!head);
        list_add_tail(&term->list, head);
@@ -325,7 +334,7 @@ event_config ',' event_term
 event_term
 {
        struct list_head *head = malloc(sizeof(*head));
-       struct parse_events__term *term = $1;
+       struct parse_events_term *term = $1;
 
        ABORT_ON(!head);
        INIT_LIST_HEAD(head);
@@ -336,70 +345,70 @@ event_term
 event_term:
 PE_NAME '=' PE_NAME
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
-       ABORT_ON(parse_events__term_str(&term, PARSE_EVENTS__TERM_TYPE_USER,
+       ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
                                        $1, $3));
        $$ = term;
 }
 |
 PE_NAME '=' PE_VALUE
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
-       ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+       ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
                                        $1, $3));
        $$ = term;
 }
 |
 PE_NAME '=' PE_VALUE_SYM_HW
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
        int config = $3 & 255;
 
-       ABORT_ON(parse_events__term_sym_hw(&term, $1, config));
+       ABORT_ON(parse_events_term__sym_hw(&term, $1, config));
        $$ = term;
 }
 |
 PE_NAME
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
-       ABORT_ON(parse_events__term_num(&term, PARSE_EVENTS__TERM_TYPE_USER,
+       ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
                                        $1, 1));
        $$ = term;
 }
 |
 PE_VALUE_SYM_HW
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
        int config = $1 & 255;
 
-       ABORT_ON(parse_events__term_sym_hw(&term, NULL, config));
+       ABORT_ON(parse_events_term__sym_hw(&term, NULL, config));
        $$ = term;
 }
 |
 PE_TERM '=' PE_NAME
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
-       ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3));
+       ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3));
        $$ = term;
 }
 |
 PE_TERM '=' PE_VALUE
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
-       ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3));
+       ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3));
        $$ = term;
 }
 |
 PE_TERM
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
-       ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1));
+       ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1));
        $$ = term;
 }
 
index 9bdc60c6f138240d309289c69f294d04bd60dd2a..4c6f9c490a8d79d0cc7f2367efd05440de735a7c 100644 (file)
@@ -1,4 +1,3 @@
-
 #include <linux/list.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include "parse-events.h"
 #include "cpumap.h"
 
+struct perf_pmu_alias {
+       char *name;
+       struct list_head terms;
+       struct list_head list;
+};
+
+struct perf_pmu_format {
+       char *name;
+       int value;
+       DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
+       struct list_head list;
+};
+
 #define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
 
 int perf_pmu_parse(struct list_head *list, char *name);
@@ -85,7 +97,7 @@ static int pmu_format(char *name, struct list_head *format)
 
 static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
 {
-       struct perf_pmu__alias *alias;
+       struct perf_pmu_alias *alias;
        char buf[256];
        int ret;
 
@@ -172,15 +184,15 @@ static int pmu_aliases(char *name, struct list_head *head)
        return 0;
 }
 
-static int pmu_alias_terms(struct perf_pmu__alias *alias,
+static int pmu_alias_terms(struct perf_pmu_alias *alias,
                           struct list_head *terms)
 {
-       struct parse_events__term *term, *clone;
+       struct parse_events_term *term, *clone;
        LIST_HEAD(list);
        int ret;
 
        list_for_each_entry(term, &alias->terms, list) {
-               ret = parse_events__term_clone(&clone, term);
+               ret = parse_events_term__clone(&clone, term);
                if (ret) {
                        parse_events__free_terms(&list);
                        return ret;
@@ -360,10 +372,10 @@ struct perf_pmu *perf_pmu__find(char *name)
        return pmu_lookup(name);
 }
 
-static struct perf_pmu__format*
+static struct perf_pmu_format *
 pmu_find_format(struct list_head *formats, char *name)
 {
-       struct perf_pmu__format *format;
+       struct perf_pmu_format *format;
 
        list_for_each_entry(format, formats, list)
                if (!strcmp(format->name, name))
@@ -403,9 +415,9 @@ static __u64 pmu_format_value(unsigned long *format, __u64 value)
  */
 static int pmu_config_term(struct list_head *formats,
                           struct perf_event_attr *attr,
-                          struct parse_events__term *term)
+                          struct parse_events_term *term)
 {
-       struct perf_pmu__format *format;
+       struct perf_pmu_format *format;
        __u64 *vp;
 
        /*
@@ -450,7 +462,7 @@ int perf_pmu__config_terms(struct list_head *formats,
                           struct perf_event_attr *attr,
                           struct list_head *head_terms)
 {
-       struct parse_events__term *term;
+       struct parse_events_term *term;
 
        list_for_each_entry(term, head_terms, list)
                if (pmu_config_term(formats, attr, term))
@@ -471,10 +483,10 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
        return perf_pmu__config_terms(&pmu->format, attr, head_terms);
 }
 
-static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
-                                             struct parse_events__term *term)
+static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
+                                            struct parse_events_term *term)
 {
-       struct perf_pmu__alias *alias;
+       struct perf_pmu_alias *alias;
        char *name;
 
        if (parse_events__is_hardcoded_term(term))
@@ -507,8 +519,8 @@ static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
  */
 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
 {
-       struct parse_events__term *term, *h;
-       struct perf_pmu__alias *alias;
+       struct parse_events_term *term, *h;
+       struct perf_pmu_alias *alias;
        int ret;
 
        list_for_each_entry_safe(term, h, head_terms, list) {
@@ -527,7 +539,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
 int perf_pmu__new_format(struct list_head *list, char *name,
                         int config, unsigned long *bits)
 {
-       struct perf_pmu__format *format;
+       struct perf_pmu_format *format;
 
        format = zalloc(sizeof(*format));
        if (!format)
@@ -548,7 +560,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
        if (!to)
                to = from;
 
-       memset(bits, 0, BITS_TO_LONGS(PERF_PMU_FORMAT_BITS));
+       memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
        for (b = from; b <= to; b++)
                set_bit(b, bits);
 }
index a313ed76a49a23dd7bb6d02e87104053629f561b..32fe55b659fa93ed17e921c0bed99a07b99f09f6 100644 (file)
@@ -12,19 +12,6 @@ enum {
 
 #define PERF_PMU_FORMAT_BITS 64
 
-struct perf_pmu__format {
-       char *name;
-       int value;
-       DECLARE_BITMAP(bits, PERF_PMU_FORMAT_BITS);
-       struct list_head list;
-};
-
-struct perf_pmu__alias {
-       char *name;
-       struct list_head terms;
-       struct list_head list;
-};
-
 struct perf_pmu {
        char *name;
        __u32 type;
@@ -42,7 +29,7 @@ int perf_pmu__config_terms(struct list_head *formats,
                           struct list_head *head_terms);
 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
 struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
-                               struct list_head *head_terms);
+                                 struct list_head *head_terms);
 int perf_pmu_wrap(void);
 void perf_pmu_error(struct list_head *list, char *name, char const *msg);
 
index ec898047ebb92869a6487405ec0de525b0d4acde..bfd7e8509869b64b53fbcbcddafe89a4a73b93b3 100644 (file)
@@ -1,5 +1,4 @@
 
-%name-prefix "perf_pmu_"
 %parse-param {struct list_head *format}
 %parse-param {char *name}
 
index 1daf5c14e751bf954e5665e1caa179b191cbd361..be0329394d5639f77644d8a9079dd6ceb4f27a73 100644 (file)
@@ -413,12 +413,12 @@ static int convert_variable_type(Dwarf_Die *vr_die,
                                   dwarf_diename(vr_die), dwarf_diename(&type));
                        return -EINVAL;
                }
+               if (die_get_real_type(&type, &type) == NULL) {
+                       pr_warning("Failed to get a type"
+                                  " information.\n");
+                       return -ENOENT;
+               }
                if (ret == DW_TAG_pointer_type) {
-                       if (die_get_real_type(&type, &type) == NULL) {
-                               pr_warning("Failed to get a type"
-                                          " information.\n");
-                               return -ENOENT;
-                       }
                        while (*ref_ptr)
                                ref_ptr = &(*ref_ptr)->next;
                        /* Add new reference with offset +0 */
index c40c2d33199ee266d3a2092c30aa8818e7e01fd0..64536a993f4a181b68072dc2ebe54f7e6f58da88 100644 (file)
@@ -18,4 +18,5 @@ util/cgroup.c
 util/debugfs.c
 util/rblist.c
 util/strlist.c
+util/sysfs.c
 ../../lib/rbtree.c
index a2657fd96837837566af611c421c1fdd4eccb6ca..925e0c3e6d910a3d867ce726797a0fb1dea932dc 100644 (file)
@@ -1045,3 +1045,12 @@ error:
        if (PyErr_Occurred())
                PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
 }
+
+/*
+ * Dummy, to avoid dragging all the test_attr infrastructure in the python
+ * binding.
+ */
+void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
+                     int fd, int group_fd, unsigned long flags)
+{
+}
index f80605eb1855774a7bc0c0d43690eee016ba62e5..eacec859f2996143a7fda07e4c96544db1057e1d 100644 (file)
@@ -292,6 +292,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
        ns = nsecs - s * NSECS_PER_SEC;
 
        scripting_context->event_data = data;
+       scripting_context->pevent = evsel->tp_format->pevent;
 
        ENTER;
        SAVETMPS;
index 14683dfca2eeb9cfaeac762dc5964d48c093328c..e87aa5d9696b41cca90c23bc9ba81529e449eab8 100644 (file)
@@ -265,6 +265,7 @@ static void python_process_tracepoint(union perf_event *perf_event
        ns = nsecs - s * NSECS_PER_SEC;
 
        scripting_context->event_data = data;
+       scripting_context->pevent = evsel->tp_format->pevent;
 
        context = PyCObject_FromVoidPtr(scripting_context, NULL);
 
index ce6f5116238670c0b5f737533cc65ef3f924d220..bd85280bb6e872b4dcd1d041a33fa20e683f5b9c 100644 (file)
@@ -16,7 +16,6 @@
 #include "cpumap.h"
 #include "event-parse.h"
 #include "perf_regs.h"
-#include "unwind.h"
 #include "vdso.h"
 
 static int perf_session__open(struct perf_session *self, bool force)
@@ -87,13 +86,12 @@ void perf_session__set_id_hdr_size(struct perf_session *session)
 {
        u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
 
-       session->host_machine.id_hdr_size = id_hdr_size;
        machines__set_id_hdr_size(&session->machines, id_hdr_size);
 }
 
 int perf_session__create_kernel_maps(struct perf_session *self)
 {
-       int ret = machine__create_kernel_maps(&self->host_machine);
+       int ret = machine__create_kernel_maps(&self->machines.host);
 
        if (ret >= 0)
                ret = machines__create_guest_kernel_maps(&self->machines);
@@ -102,8 +100,7 @@ int perf_session__create_kernel_maps(struct perf_session *self)
 
 static void perf_session__destroy_kernel_maps(struct perf_session *self)
 {
-       machine__destroy_kernel_maps(&self->host_machine);
-       machines__destroy_guest_kernel_maps(&self->machines);
+       machines__destroy_kernel_maps(&self->machines);
 }
 
 struct perf_session *perf_session__new(const char *filename, int mode,
@@ -128,22 +125,11 @@ struct perf_session *perf_session__new(const char *filename, int mode,
                goto out;
 
        memcpy(self->filename, filename, len);
-       /*
-        * On 64bit we can mmap the data file in one go. No need for tiny mmap
-        * slices. On 32bit we use 32MB.
-        */
-#if BITS_PER_LONG == 64
-       self->mmap_window = ULLONG_MAX;
-#else
-       self->mmap_window = 32 * 1024 * 1024ULL;
-#endif
-       self->machines = RB_ROOT;
        self->repipe = repipe;
        INIT_LIST_HEAD(&self->ordered_samples.samples);
        INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
        INIT_LIST_HEAD(&self->ordered_samples.to_free);
-       machine__init(&self->host_machine, "", HOST_KERNEL_ID);
-       hists__init(&self->hists);
+       machines__init(&self->machines);
 
        if (mode == O_RDONLY) {
                if (perf_session__open(self, force) < 0)
@@ -171,37 +157,30 @@ out_delete:
        return NULL;
 }
 
-static void machine__delete_dead_threads(struct machine *machine)
-{
-       struct thread *n, *t;
-
-       list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
-               list_del(&t->node);
-               thread__delete(t);
-       }
-}
-
 static void perf_session__delete_dead_threads(struct perf_session *session)
 {
-       machine__delete_dead_threads(&session->host_machine);
+       machine__delete_dead_threads(&session->machines.host);
 }
 
-static void machine__delete_threads(struct machine *self)
+static void perf_session__delete_threads(struct perf_session *session)
 {
-       struct rb_node *nd = rb_first(&self->threads);
-
-       while (nd) {
-               struct thread *t = rb_entry(nd, struct thread, rb_node);
-
-               rb_erase(&t->rb_node, &self->threads);
-               nd = rb_next(nd);
-               thread__delete(t);
-       }
+       machine__delete_threads(&session->machines.host);
 }
 
-static void perf_session__delete_threads(struct perf_session *session)
+static void perf_session_env__delete(struct perf_session_env *env)
 {
-       machine__delete_threads(&session->host_machine);
+       free(env->hostname);
+       free(env->os_release);
+       free(env->version);
+       free(env->arch);
+       free(env->cpu_desc);
+       free(env->cpuid);
+
+       free(env->cmdline);
+       free(env->sibling_cores);
+       free(env->sibling_threads);
+       free(env->numa_nodes);
+       free(env->pmu_mappings);
 }
 
 void perf_session__delete(struct perf_session *self)
@@ -209,198 +188,13 @@ void perf_session__delete(struct perf_session *self)
        perf_session__destroy_kernel_maps(self);
        perf_session__delete_dead_threads(self);
        perf_session__delete_threads(self);
-       machine__exit(&self->host_machine);
+       perf_session_env__delete(&self->header.env);
+       machines__exit(&self->machines);
        close(self->fd);
        free(self);
        vdso__exit();
 }
 
-void machine__remove_thread(struct machine *self, struct thread *th)
-{
-       self->last_match = NULL;
-       rb_erase(&th->rb_node, &self->threads);
-       /*
-        * We may have references to this thread, for instance in some hist_entry
-        * instances, so just move them to a separate list.
-        */
-       list_add_tail(&th->node, &self->dead_threads);
-}
-
-static bool symbol__match_parent_regex(struct symbol *sym)
-{
-       if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
-               return 1;
-
-       return 0;
-}
-
-static const u8 cpumodes[] = {
-       PERF_RECORD_MISC_USER,
-       PERF_RECORD_MISC_KERNEL,
-       PERF_RECORD_MISC_GUEST_USER,
-       PERF_RECORD_MISC_GUEST_KERNEL
-};
-#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
-
-static void ip__resolve_ams(struct machine *self, struct thread *thread,
-                           struct addr_map_symbol *ams,
-                           u64 ip)
-{
-       struct addr_location al;
-       size_t i;
-       u8 m;
-
-       memset(&al, 0, sizeof(al));
-
-       for (i = 0; i < NCPUMODES; i++) {
-               m = cpumodes[i];
-               /*
-                * We cannot use the header.misc hint to determine whether a
-                * branch stack address is user, kernel, guest, hypervisor.
-                * Branches may straddle the kernel/user/hypervisor boundaries.
-                * Thus, we have to try consecutively until we find a match
-                * or else, the symbol is unknown
-                */
-               thread__find_addr_location(thread, self, m, MAP__FUNCTION,
-                               ip, &al, NULL);
-               if (al.sym)
-                       goto found;
-       }
-found:
-       ams->addr = ip;
-       ams->al_addr = al.addr;
-       ams->sym = al.sym;
-       ams->map = al.map;
-}
-
-struct branch_info *machine__resolve_bstack(struct machine *self,
-                                           struct thread *thr,
-                                           struct branch_stack *bs)
-{
-       struct branch_info *bi;
-       unsigned int i;
-
-       bi = calloc(bs->nr, sizeof(struct branch_info));
-       if (!bi)
-               return NULL;
-
-       for (i = 0; i < bs->nr; i++) {
-               ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
-               ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
-               bi[i].flags = bs->entries[i].flags;
-       }
-       return bi;
-}
-
-static int machine__resolve_callchain_sample(struct machine *machine,
-                                            struct thread *thread,
-                                            struct ip_callchain *chain,
-                                            struct symbol **parent)
-
-{
-       u8 cpumode = PERF_RECORD_MISC_USER;
-       unsigned int i;
-       int err;
-
-       callchain_cursor_reset(&callchain_cursor);
-
-       if (chain->nr > PERF_MAX_STACK_DEPTH) {
-               pr_warning("corrupted callchain. skipping...\n");
-               return 0;
-       }
-
-       for (i = 0; i < chain->nr; i++) {
-               u64 ip;
-               struct addr_location al;
-
-               if (callchain_param.order == ORDER_CALLEE)
-                       ip = chain->ips[i];
-               else
-                       ip = chain->ips[chain->nr - i - 1];
-
-               if (ip >= PERF_CONTEXT_MAX) {
-                       switch (ip) {
-                       case PERF_CONTEXT_HV:
-                               cpumode = PERF_RECORD_MISC_HYPERVISOR;
-                               break;
-                       case PERF_CONTEXT_KERNEL:
-                               cpumode = PERF_RECORD_MISC_KERNEL;
-                               break;
-                       case PERF_CONTEXT_USER:
-                               cpumode = PERF_RECORD_MISC_USER;
-                               break;
-                       default:
-                               pr_debug("invalid callchain context: "
-                                        "%"PRId64"\n", (s64) ip);
-                               /*
-                                * It seems the callchain is corrupted.
-                                * Discard all.
-                                */
-                               callchain_cursor_reset(&callchain_cursor);
-                               return 0;
-                       }
-                       continue;
-               }
-
-               al.filtered = false;
-               thread__find_addr_location(thread, machine, cpumode,
-                                          MAP__FUNCTION, ip, &al, NULL);
-               if (al.sym != NULL) {
-                       if (sort__has_parent && !*parent &&
-                           symbol__match_parent_regex(al.sym))
-                               *parent = al.sym;
-                       if (!symbol_conf.use_callchain)
-                               break;
-               }
-
-               err = callchain_cursor_append(&callchain_cursor,
-                                             ip, al.map, al.sym);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int unwind_entry(struct unwind_entry *entry, void *arg)
-{
-       struct callchain_cursor *cursor = arg;
-       return callchain_cursor_append(cursor, entry->ip,
-                                      entry->map, entry->sym);
-}
-
-int machine__resolve_callchain(struct machine *machine,
-                              struct perf_evsel *evsel,
-                              struct thread *thread,
-                              struct perf_sample *sample,
-                              struct symbol **parent)
-
-{
-       int ret;
-
-       callchain_cursor_reset(&callchain_cursor);
-
-       ret = machine__resolve_callchain_sample(machine, thread,
-                                               sample->callchain, parent);
-       if (ret)
-               return ret;
-
-       /* Can we do dwarf post unwind? */
-       if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
-             (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
-               return 0;
-
-       /* Bail out if nothing was captured. */
-       if ((!sample->user_regs.regs) ||
-           (!sample->user_stack.size))
-               return 0;
-
-       return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
-                                  thread, evsel->attr.sample_regs_user,
-                                  sample);
-
-}
-
 static int process_event_synth_tracing_data_stub(union perf_event *event
                                                 __maybe_unused,
                                                 struct perf_session *session
@@ -1027,7 +821,7 @@ static struct machine *
                return perf_session__findnew_machine(session, pid);
        }
 
-       return perf_session__find_host_machine(session);
+       return &session->machines.host;
 }
 
 static int perf_session_deliver_event(struct perf_session *session,
@@ -1065,11 +859,11 @@ static int perf_session_deliver_event(struct perf_session *session,
        case PERF_RECORD_SAMPLE:
                dump_sample(evsel, event, sample);
                if (evsel == NULL) {
-                       ++session->hists.stats.nr_unknown_id;
+                       ++session->stats.nr_unknown_id;
                        return 0;
                }
                if (machine == NULL) {
-                       ++session->hists.stats.nr_unprocessable_samples;
+                       ++session->stats.nr_unprocessable_samples;
                        return 0;
                }
                return tool->sample(tool, event, sample, evsel, machine);
@@ -1083,7 +877,7 @@ static int perf_session_deliver_event(struct perf_session *session,
                return tool->exit(tool, event, sample, machine);
        case PERF_RECORD_LOST:
                if (tool->lost == perf_event__process_lost)
-                       session->hists.stats.total_lost += event->lost.lost;
+                       session->stats.total_lost += event->lost.lost;
                return tool->lost(tool, event, sample, machine);
        case PERF_RECORD_READ:
                return tool->read(tool, event, sample, evsel, machine);
@@ -1092,7 +886,7 @@ static int perf_session_deliver_event(struct perf_session *session,
        case PERF_RECORD_UNTHROTTLE:
                return tool->unthrottle(tool, event, sample, machine);
        default:
-               ++session->hists.stats.nr_unknown_events;
+               ++session->stats.nr_unknown_events;
                return -1;
        }
 }
@@ -1106,8 +900,8 @@ static int perf_session__preprocess_sample(struct perf_session *session,
 
        if (!ip_callchain__valid(sample->callchain, event)) {
                pr_debug("call-chain problem with event, skipping it.\n");
-               ++session->hists.stats.nr_invalid_chains;
-               session->hists.stats.total_invalid_chains += sample->period;
+               ++session->stats.nr_invalid_chains;
+               session->stats.total_invalid_chains += sample->period;
                return -EINVAL;
        }
        return 0;
@@ -1165,7 +959,7 @@ static int perf_session__process_event(struct perf_session *session,
        if (event->header.type >= PERF_RECORD_HEADER_MAX)
                return -EINVAL;
 
-       hists__inc_nr_events(&session->hists, event->header.type);
+       events_stats__inc(&session->stats, event->header.type);
 
        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
                return perf_session__process_user_event(session, event, tool, file_offset);
@@ -1201,7 +995,7 @@ void perf_event_header__bswap(struct perf_event_header *self)
 
 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
 {
-       return machine__findnew_thread(&session->host_machine, pid);
+       return machine__findnew_thread(&session->machines.host, pid);
 }
 
 static struct thread *perf_session__register_idle_thread(struct perf_session *self)
@@ -1220,39 +1014,39 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
                                            const struct perf_tool *tool)
 {
        if (tool->lost == perf_event__process_lost &&
-           session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
+           session->stats.nr_events[PERF_RECORD_LOST] != 0) {
                ui__warning("Processed %d events and lost %d chunks!\n\n"
                            "Check IO/CPU overload!\n\n",
-                           session->hists.stats.nr_events[0],
-                           session->hists.stats.nr_events[PERF_RECORD_LOST]);
+                           session->stats.nr_events[0],
+                           session->stats.nr_events[PERF_RECORD_LOST]);
        }
 
-       if (session->hists.stats.nr_unknown_events != 0) {
+       if (session->stats.nr_unknown_events != 0) {
                ui__warning("Found %u unknown events!\n\n"
                            "Is this an older tool processing a perf.data "
                            "file generated by a more recent tool?\n\n"
                            "If that is not the case, consider "
                            "reporting to linux-kernel@vger.kernel.org.\n\n",
-                           session->hists.stats.nr_unknown_events);
+                           session->stats.nr_unknown_events);
        }
 
-       if (session->hists.stats.nr_unknown_id != 0) {
+       if (session->stats.nr_unknown_id != 0) {
                ui__warning("%u samples with id not present in the header\n",
-                           session->hists.stats.nr_unknown_id);
+                           session->stats.nr_unknown_id);
        }
 
-       if (session->hists.stats.nr_invalid_chains != 0) {
+       if (session->stats.nr_invalid_chains != 0) {
                ui__warning("Found invalid callchains!\n\n"
                            "%u out of %u events were discarded for this reason.\n\n"
                            "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
-                           session->hists.stats.nr_invalid_chains,
-                           session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
+                           session->stats.nr_invalid_chains,
+                           session->stats.nr_events[PERF_RECORD_SAMPLE]);
        }
 
-       if (session->hists.stats.nr_unprocessable_samples != 0) {
+       if (session->stats.nr_unprocessable_samples != 0) {
                ui__warning("%u unprocessable samples recorded.\n"
                            "Do you have a KVM guest running and not using 'perf kvm'?\n",
-                           session->hists.stats.nr_unprocessable_samples);
+                           session->stats.nr_unprocessable_samples);
        }
 }
 
@@ -1369,6 +1163,18 @@ fetch_mmaped_event(struct perf_session *session,
        return event;
 }
 
+/*
+ * On 64bit we can mmap the data file in one go. No need for tiny mmap
+ * slices. On 32bit we use 32MB.
+ */
+#if BITS_PER_LONG == 64
+#define MMAP_SIZE ULLONG_MAX
+#define NUM_MMAPS 1
+#else
+#define MMAP_SIZE (32 * 1024 * 1024ULL)
+#define NUM_MMAPS 128
+#endif
+
 int __perf_session__process_events(struct perf_session *session,
                                   u64 data_offset, u64 data_size,
                                   u64 file_size, struct perf_tool *tool)
@@ -1376,7 +1182,7 @@ int __perf_session__process_events(struct perf_session *session,
        u64 head, page_offset, file_offset, file_pos, progress_next;
        int err, mmap_prot, mmap_flags, map_idx = 0;
        size_t  mmap_size;
-       char *buf, *mmaps[8];
+       char *buf, *mmaps[NUM_MMAPS];
        union perf_event *event;
        uint32_t size;
 
@@ -1391,7 +1197,7 @@ int __perf_session__process_events(struct perf_session *session,
 
        progress_next = file_size / 16;
 
-       mmap_size = session->mmap_window;
+       mmap_size = MMAP_SIZE;
        if (mmap_size > file_size)
                mmap_size = file_size;
 
@@ -1526,16 +1332,13 @@ int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
 
 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
 {
-       return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
-              __dsos__fprintf(&self->host_machine.user_dsos, fp) +
-              machines__fprintf_dsos(&self->machines, fp);
+       return machines__fprintf_dsos(&self->machines, fp);
 }
 
 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
-                                         bool with_hits)
+                                         bool (skip)(struct dso *dso, int parm), int parm)
 {
-       size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
-       return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
+       return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
 }
 
 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
@@ -1543,11 +1346,11 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
        struct perf_evsel *pos;
        size_t ret = fprintf(fp, "Aggregated stats:\n");
 
-       ret += hists__fprintf_nr_events(&session->hists, fp);
+       ret += events_stats__fprintf(&session->stats, fp);
 
        list_for_each_entry(pos, &session->evlist->entries, node) {
                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
-               ret += hists__fprintf_nr_events(&pos->hists, fp);
+               ret += events_stats__fprintf(&pos->hists.stats, fp);
        }
 
        return ret;
@@ -1559,7 +1362,7 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
         * FIXME: Here we have to actually print all the machines in this
         * session, not just the host...
         */
-       return machine__fprintf(&session->host_machine, fp);
+       return machine__fprintf(&session->machines.host, fp);
 }
 
 void perf_session__remove_thread(struct perf_session *session,
@@ -1568,10 +1371,10 @@ void perf_session__remove_thread(struct perf_session *session,
        /*
         * FIXME: This one makes no sense, we need to remove the thread from
         * the machine it belongs to, perf_session can have many machines, so
-        * doing it always on ->host_machine is wrong.  Fix when auditing all
+        * doing it always on ->machines.host is wrong.  Fix when auditing all
         * the 'perf kvm' code.
         */
-       machine__remove_thread(&session->host_machine, th);
+       machine__remove_thread(&session->machines.host, th);
 }
 
 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
index cea133a6bdf1fd54061a3be27927fc32d3996184..b5c0847edfa92d3a826bb4b73c6cb59d7a19f2ea 100644 (file)
@@ -30,16 +30,10 @@ struct ordered_samples {
 struct perf_session {
        struct perf_header      header;
        unsigned long           size;
-       unsigned long           mmap_window;
-       struct machine          host_machine;
-       struct rb_root          machines;
+       struct machines         machines;
        struct perf_evlist      *evlist;
        struct pevent           *pevent;
-       /*
-        * FIXME: Need to split this up further, we need global
-        *        stats + per event stats.
-        */
-       struct hists            hists;
+       struct events_stats     stats;
        int                     fd;
        bool                    fd_pipe;
        bool                    repipe;
@@ -54,7 +48,7 @@ struct perf_tool;
 struct perf_session *perf_session__new(const char *filename, int mode,
                                       bool force, bool repipe,
                                       struct perf_tool *tool);
-void perf_session__delete(struct perf_session *self);
+void perf_session__delete(struct perf_session *session);
 
 void perf_event_header__bswap(struct perf_event_header *self);
 
@@ -80,44 +74,25 @@ int perf_session__create_kernel_maps(struct perf_session *self);
 void perf_session__set_id_hdr_size(struct perf_session *session);
 void perf_session__remove_thread(struct perf_session *self, struct thread *th);
 
-static inline
-struct machine *perf_session__find_host_machine(struct perf_session *self)
-{
-       return &self->host_machine;
-}
-
 static inline
 struct machine *perf_session__find_machine(struct perf_session *self, pid_t pid)
 {
-       if (pid == HOST_KERNEL_ID)
-               return &self->host_machine;
        return machines__find(&self->machines, pid);
 }
 
 static inline
 struct machine *perf_session__findnew_machine(struct perf_session *self, pid_t pid)
 {
-       if (pid == HOST_KERNEL_ID)
-               return &self->host_machine;
        return machines__findnew(&self->machines, pid);
 }
 
-static inline
-void perf_session__process_machines(struct perf_session *self,
-                                   struct perf_tool *tool,
-                                   machine__process_t process)
-{
-       process(&self->host_machine, tool);
-       return machines__process(&self->machines, process, tool);
-}
-
 struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
 size_t perf_session__fprintf(struct perf_session *self, FILE *fp);
 
 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp);
 
-size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
-                                         FILE *fp, bool with_hits);
+size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
+                                         bool (fn)(struct dso *dso, int parm), int parm);
 
 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
 
index cfd1c0feb32d7da84d0b8b73b81883729fa623df..d41926cb9e3f23e9f1f296c795b39d04212d178f 100644 (file)
@@ -60,7 +60,7 @@ sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
                                       size_t size, unsigned int width)
 {
-       return repsep_snprintf(bf, size, "%*s:%5d", width,
+       return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
                              self->thread->comm ?: "", self->thread->pid);
 }
 
@@ -97,6 +97,16 @@ static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
        return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
 }
 
+struct sort_entry sort_comm = {
+       .se_header      = "Command",
+       .se_cmp         = sort__comm_cmp,
+       .se_collapse    = sort__comm_collapse,
+       .se_snprintf    = hist_entry__comm_snprintf,
+       .se_width_idx   = HISTC_COMM,
+};
+
+/* --sort dso */
+
 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
 {
        struct dso *dso_l = map_l ? map_l->dso : NULL;
@@ -117,40 +127,12 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
        return strcmp(dso_name_l, dso_name_r);
 }
 
-struct sort_entry sort_comm = {
-       .se_header      = "Command",
-       .se_cmp         = sort__comm_cmp,
-       .se_collapse    = sort__comm_collapse,
-       .se_snprintf    = hist_entry__comm_snprintf,
-       .se_width_idx   = HISTC_COMM,
-};
-
-/* --sort dso */
-
 static int64_t
 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
 {
        return _sort__dso_cmp(left->ms.map, right->ms.map);
 }
 
-
-static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r,
-                             u64 ip_l, u64 ip_r)
-{
-       if (!sym_l || !sym_r)
-               return cmp_null(sym_l, sym_r);
-
-       if (sym_l == sym_r)
-               return 0;
-
-       if (sym_l)
-               ip_l = sym_l->start;
-       if (sym_r)
-               ip_r = sym_r->start;
-
-       return (int64_t)(ip_r - ip_l);
-}
-
 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
                                     size_t size, unsigned int width)
 {
@@ -169,9 +151,43 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
        return _hist_entry__dso_snprintf(self->ms.map, bf, size, width);
 }
 
+struct sort_entry sort_dso = {
+       .se_header      = "Shared Object",
+       .se_cmp         = sort__dso_cmp,
+       .se_snprintf    = hist_entry__dso_snprintf,
+       .se_width_idx   = HISTC_DSO,
+};
+
+/* --sort symbol */
+
+static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
+{
+       u64 ip_l, ip_r;
+
+       if (!sym_l || !sym_r)
+               return cmp_null(sym_l, sym_r);
+
+       if (sym_l == sym_r)
+               return 0;
+
+       ip_l = sym_l->start;
+       ip_r = sym_r->start;
+
+       return (int64_t)(ip_r - ip_l);
+}
+
+static int64_t
+sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       if (!left->ms.sym && !right->ms.sym)
+               return right->level - left->level;
+
+       return _sort__sym_cmp(left->ms.sym, right->ms.sym);
+}
+
 static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
                                     u64 ip, char level, char *bf, size_t size,
-                                    unsigned int width __maybe_unused)
+                                    unsigned int width)
 {
        size_t ret = 0;
 
@@ -197,43 +213,13 @@ static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
        return ret;
 }
 
-
-struct sort_entry sort_dso = {
-       .se_header      = "Shared Object",
-       .se_cmp         = sort__dso_cmp,
-       .se_snprintf    = hist_entry__dso_snprintf,
-       .se_width_idx   = HISTC_DSO,
-};
-
 static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
-                                   size_t size,
-                                   unsigned int width __maybe_unused)
+                                   size_t size, unsigned int width)
 {
        return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
                                         self->level, bf, size, width);
 }
 
-/* --sort symbol */
-static int64_t
-sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
-{
-       u64 ip_l, ip_r;
-
-       if (!left->ms.sym && !right->ms.sym)
-               return right->level - left->level;
-
-       if (!left->ms.sym || !right->ms.sym)
-               return cmp_null(left->ms.sym, right->ms.sym);
-
-       if (left->ms.sym == right->ms.sym)
-               return 0;
-
-       ip_l = left->ms.sym->start;
-       ip_r = right->ms.sym->start;
-
-       return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r);
-}
-
 struct sort_entry sort_sym = {
        .se_header      = "Symbol",
        .se_cmp         = sort__sym_cmp,
@@ -253,7 +239,7 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
                                        size_t size,
                                        unsigned int width __maybe_unused)
 {
-       FILE *fp;
+       FILE *fp = NULL;
        char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
        size_t line_len;
 
@@ -274,7 +260,6 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
 
        if (getline(&path, &line_len, fp) < 0 || !line_len)
                goto out_ip;
-       fclose(fp);
        self->srcline = strdup(path);
        if (self->srcline == NULL)
                goto out_ip;
@@ -284,8 +269,12 @@ static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
                *nl = '\0';
        path = self->srcline;
 out_path:
+       if (fp)
+               pclose(fp);
        return repsep_snprintf(bf, size, "%s", path);
 out_ip:
+       if (fp)
+               pclose(fp);
        return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
 }
 
@@ -335,7 +324,7 @@ sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
 static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
                                       size_t size, unsigned int width)
 {
-       return repsep_snprintf(bf, size, "%-*d", width, self->cpu);
+       return repsep_snprintf(bf, size, "%*d", width, self->cpu);
 }
 
 struct sort_entry sort_cpu = {
@@ -345,6 +334,8 @@ struct sort_entry sort_cpu = {
        .se_width_idx   = HISTC_CPU,
 };
 
+/* sort keys for branch stacks */
+
 static int64_t
 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
 {
@@ -359,13 +350,6 @@ static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf,
                                         bf, size, width);
 }
 
-struct sort_entry sort_dso_from = {
-       .se_header      = "Source Shared Object",
-       .se_cmp         = sort__dso_from_cmp,
-       .se_snprintf    = hist_entry__dso_from_snprintf,
-       .se_width_idx   = HISTC_DSO_FROM,
-};
-
 static int64_t
 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
 {
@@ -389,8 +373,7 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
        if (!from_l->sym && !from_r->sym)
                return right->level - left->level;
 
-       return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr,
-                            from_r->addr);
+       return _sort__sym_cmp(from_l->sym, from_r->sym);
 }
 
 static int64_t
@@ -402,12 +385,11 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
        if (!to_l->sym && !to_r->sym)
                return right->level - left->level;
 
-       return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr);
+       return _sort__sym_cmp(to_l->sym, to_r->sym);
 }
 
 static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
-                                       size_t size,
-                                       unsigned int width __maybe_unused)
+                                        size_t size, unsigned int width)
 {
        struct addr_map_symbol *from = &self->branch_info->from;
        return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
@@ -416,8 +398,7 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
 }
 
 static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
-                                      size_t size,
-                                      unsigned int width __maybe_unused)
+                                      size_t size, unsigned int width)
 {
        struct addr_map_symbol *to = &self->branch_info->to;
        return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
@@ -425,6 +406,13 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
 
 }
 
+struct sort_entry sort_dso_from = {
+       .se_header      = "Source Shared Object",
+       .se_cmp         = sort__dso_from_cmp,
+       .se_snprintf    = hist_entry__dso_from_snprintf,
+       .se_width_idx   = HISTC_DSO_FROM,
+};
+
 struct sort_entry sort_dso_to = {
        .se_header      = "Target Shared Object",
        .se_cmp         = sort__dso_to_cmp,
@@ -484,30 +472,40 @@ struct sort_dimension {
 
 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
 
-static struct sort_dimension sort_dimensions[] = {
+static struct sort_dimension common_sort_dimensions[] = {
        DIM(SORT_PID, "pid", sort_thread),
        DIM(SORT_COMM, "comm", sort_comm),
        DIM(SORT_DSO, "dso", sort_dso),
-       DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
-       DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
        DIM(SORT_SYM, "symbol", sort_sym),
-       DIM(SORT_SYM_TO, "symbol_from", sort_sym_from),
-       DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to),
        DIM(SORT_PARENT, "parent", sort_parent),
        DIM(SORT_CPU, "cpu", sort_cpu),
-       DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
        DIM(SORT_SRCLINE, "srcline", sort_srcline),
 };
 
+#undef DIM
+
+#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
+
+static struct sort_dimension bstack_sort_dimensions[] = {
+       DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
+       DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
+       DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
+       DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
+       DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+};
+
+#undef DIM
+
 int sort_dimension__add(const char *tok)
 {
        unsigned int i;
 
-       for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
-               struct sort_dimension *sd = &sort_dimensions[i];
+       for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
+               struct sort_dimension *sd = &common_sort_dimensions[i];
 
                if (strncasecmp(tok, sd->name, strlen(tok)))
                        continue;
+
                if (sd->entry == &sort_parent) {
                        int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
                        if (ret) {
@@ -518,9 +516,7 @@ int sort_dimension__add(const char *tok)
                                return -EINVAL;
                        }
                        sort__has_parent = 1;
-               } else if (sd->entry == &sort_sym ||
-                          sd->entry == &sort_sym_from ||
-                          sd->entry == &sort_sym_to) {
+               } else if (sd->entry == &sort_sym) {
                        sort__has_sym = 1;
                }
 
@@ -530,52 +526,69 @@ int sort_dimension__add(const char *tok)
                if (sd->entry->se_collapse)
                        sort__need_collapse = 1;
 
-               if (list_empty(&hist_entry__sort_list)) {
-                       if (!strcmp(sd->name, "pid"))
-                               sort__first_dimension = SORT_PID;
-                       else if (!strcmp(sd->name, "comm"))
-                               sort__first_dimension = SORT_COMM;
-                       else if (!strcmp(sd->name, "dso"))
-                               sort__first_dimension = SORT_DSO;
-                       else if (!strcmp(sd->name, "symbol"))
-                               sort__first_dimension = SORT_SYM;
-                       else if (!strcmp(sd->name, "parent"))
-                               sort__first_dimension = SORT_PARENT;
-                       else if (!strcmp(sd->name, "cpu"))
-                               sort__first_dimension = SORT_CPU;
-                       else if (!strcmp(sd->name, "symbol_from"))
-                               sort__first_dimension = SORT_SYM_FROM;
-                       else if (!strcmp(sd->name, "symbol_to"))
-                               sort__first_dimension = SORT_SYM_TO;
-                       else if (!strcmp(sd->name, "dso_from"))
-                               sort__first_dimension = SORT_DSO_FROM;
-                       else if (!strcmp(sd->name, "dso_to"))
-                               sort__first_dimension = SORT_DSO_TO;
-                       else if (!strcmp(sd->name, "mispredict"))
-                               sort__first_dimension = SORT_MISPREDICT;
-               }
+               if (list_empty(&hist_entry__sort_list))
+                       sort__first_dimension = i;
 
                list_add_tail(&sd->entry->list, &hist_entry__sort_list);
                sd->taken = 1;
 
                return 0;
        }
+
+       for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
+               struct sort_dimension *sd = &bstack_sort_dimensions[i];
+
+               if (strncasecmp(tok, sd->name, strlen(tok)))
+                       continue;
+
+               if (sort__branch_mode != 1)
+                       return -EINVAL;
+
+               if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
+                       sort__has_sym = 1;
+
+               if (sd->taken)
+                       return 0;
+
+               if (sd->entry->se_collapse)
+                       sort__need_collapse = 1;
+
+               if (list_empty(&hist_entry__sort_list))
+                       sort__first_dimension = i + __SORT_BRANCH_STACK;
+
+               list_add_tail(&sd->entry->list, &hist_entry__sort_list);
+               sd->taken = 1;
+
+               return 0;
+       }
+
        return -ESRCH;
 }
 
-void setup_sorting(const char * const usagestr[], const struct option *opts)
+int setup_sorting(void)
 {
        char *tmp, *tok, *str = strdup(sort_order);
+       int ret = 0;
+
+       if (str == NULL) {
+               error("Not enough memory to setup sort keys");
+               return -ENOMEM;
+       }
 
        for (tok = strtok_r(str, ", ", &tmp);
                        tok; tok = strtok_r(NULL, ", ", &tmp)) {
-               if (sort_dimension__add(tok) < 0) {
+               ret = sort_dimension__add(tok);
+               if (ret == -EINVAL) {
+                       error("Invalid --sort key: `%s'", tok);
+                       break;
+               } else if (ret == -ESRCH) {
                        error("Unknown --sort key: `%s'", tok);
-                       usage_with_options(usagestr, opts);
+                       break;
                }
        }
 
        free(str);
+       return ret;
 }
 
 void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
index b4e8c3ba559de22357cc0cb7a3100e5ee081b327..b13e56f6ccbebf7aa03b25c6ffdf98357b78716b 100644 (file)
@@ -55,9 +55,6 @@ struct he_stat {
 struct hist_entry_diff {
        bool    computed;
 
-       /* PERF_HPP__DISPL */
-       int     displacement;
-
        /* PERF_HPP__DELTA */
        double  period_ratio_delta;
 
@@ -118,25 +115,29 @@ static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
        return NULL;
 }
 
-static inline void hist__entry_add_pair(struct hist_entry *he,
+static inline void hist_entry__add_pair(struct hist_entry *he,
                                        struct hist_entry *pair)
 {
        list_add_tail(&he->pairs.head, &pair->pairs.node);
 }
 
 enum sort_type {
+       /* common sort keys */
        SORT_PID,
        SORT_COMM,
        SORT_DSO,
        SORT_SYM,
        SORT_PARENT,
        SORT_CPU,
-       SORT_DSO_FROM,
+       SORT_SRCLINE,
+
+       /* branch stack specific sort keys */
+       __SORT_BRANCH_STACK,
+       SORT_DSO_FROM = __SORT_BRANCH_STACK,
        SORT_DSO_TO,
        SORT_SYM_FROM,
        SORT_SYM_TO,
        SORT_MISPREDICT,
-       SORT_SRCLINE,
 };
 
 /*
@@ -159,7 +160,7 @@ struct sort_entry {
 extern struct sort_entry sort_thread;
 extern struct list_head hist_entry__sort_list;
 
-void setup_sorting(const char * const usagestr[], const struct option *opts);
+int setup_sorting(void);
 extern int sort_dimension__add(const char *);
 void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
                             const char *list_name, FILE *fp);
index 346707df04b9f0b6e2176fa592c5baea91daa238..29c7b2cb252192b8cb9d184ef320c32265de832e 100644 (file)
@@ -331,6 +331,24 @@ char *strxfrchar(char *s, char from, char to)
        return s;
 }
 
+/**
+ * ltrim - Removes leading whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Return pointer to the first non-whitespace character in @s.
+ */
+char *ltrim(char *s)
+{
+       int len = strlen(s);
+
+       while (len && isspace(*s)) {
+               len--;
+               s++;
+       }
+
+       return s;
+}
+
 /**
  * rtrim - Removes trailing whitespace from @s.
  * @s: The string to be stripped.
index 155d8b7078a723ddfec5030048a72b6500840019..55433aa42c8f30cb8dbeeae537ce8ea0ad47965b 100644 (file)
@@ -35,11 +35,11 @@ out_delete:
        return NULL;
 }
 
-static void str_node__delete(struct str_node *self, bool dupstr)
+static void str_node__delete(struct str_node *snode, bool dupstr)
 {
        if (dupstr)
-               free((void *)self->s);
-       free(self);
+               free((void *)snode->s);
+       free(snode);
 }
 
 static
@@ -59,12 +59,12 @@ static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
        return strcmp(snode->s, str);
 }
 
-int strlist__add(struct strlist *self, const char *new_entry)
+int strlist__add(struct strlist *slist, const char *new_entry)
 {
-       return rblist__add_node(&self->rblist, new_entry);
+       return rblist__add_node(&slist->rblist, new_entry);
 }
 
-int strlist__load(struct strlist *self, const char *filename)
+int strlist__load(struct strlist *slist, const char *filename)
 {
        char entry[1024];
        int err;
@@ -80,7 +80,7 @@ int strlist__load(struct strlist *self, const char *filename)
                        continue;
                entry[len - 1] = '\0';
 
-               err = strlist__add(self, entry);
+               err = strlist__add(slist, entry);
                if (err != 0)
                        goto out;
        }
@@ -107,56 +107,56 @@ struct str_node *strlist__find(struct strlist *slist, const char *entry)
        return snode;
 }
 
-static int strlist__parse_list_entry(struct strlist *self, const char *s)
+static int strlist__parse_list_entry(struct strlist *slist, const char *s)
 {
        if (strncmp(s, "file://", 7) == 0)
-               return strlist__load(self, s + 7);
+               return strlist__load(slist, s + 7);
 
-       return strlist__add(self, s);
+       return strlist__add(slist, s);
 }
 
-int strlist__parse_list(struct strlist *self, const char *s)
+int strlist__parse_list(struct strlist *slist, const char *s)
 {
        char *sep;
        int err;
 
        while ((sep = strchr(s, ',')) != NULL) {
                *sep = '\0';
-               err = strlist__parse_list_entry(self, s);
+               err = strlist__parse_list_entry(slist, s);
                *sep = ',';
                if (err != 0)
                        return err;
                s = sep + 1;
        }
 
-       return *s ? strlist__parse_list_entry(self, s) : 0;
+       return *s ? strlist__parse_list_entry(slist, s) : 0;
 }
 
-struct strlist *strlist__new(bool dupstr, const char *slist)
+struct strlist *strlist__new(bool dupstr, const char *list)
 {
-       struct strlist *self = malloc(sizeof(*self));
+       struct strlist *slist = malloc(sizeof(*slist));
 
-       if (self != NULL) {
-               rblist__init(&self->rblist);
-               self->rblist.node_cmp    = strlist__node_cmp;
-               self->rblist.node_new    = strlist__node_new;
-               self->rblist.node_delete = strlist__node_delete;
+       if (slist != NULL) {
+               rblist__init(&slist->rblist);
+               slist->rblist.node_cmp    = strlist__node_cmp;
+               slist->rblist.node_new    = strlist__node_new;
+               slist->rblist.node_delete = strlist__node_delete;
 
-               self->dupstr     = dupstr;
-               if (slist && strlist__parse_list(self, slist) != 0)
+               slist->dupstr    = dupstr;
+               if (slist && strlist__parse_list(slist, list) != 0)
                        goto out_error;
        }
 
-       return self;
+       return slist;
 out_error:
-       free(self);
+       free(slist);
        return NULL;
 }
 
-void strlist__delete(struct strlist *self)
+void strlist__delete(struct strlist *slist)
 {
-       if (self != NULL)
-               rblist__delete(&self->rblist);
+       if (slist != NULL)
+               rblist__delete(&slist->rblist);
 }
 
 struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
index dd9f922ec67c58845ff22b6d3fabe3eb888c21e6..5c7f87069d9cd7bfa5412c186068840e76822b2e 100644 (file)
@@ -17,34 +17,34 @@ struct strlist {
 };
 
 struct strlist *strlist__new(bool dupstr, const char *slist);
-void strlist__delete(struct strlist *self);
+void strlist__delete(struct strlist *slist);
 
-void strlist__remove(struct strlist *self, struct str_node *sn);
-int strlist__load(struct strlist *self, const char *filename);
-int strlist__add(struct strlist *self, const char *str);
+void strlist__remove(struct strlist *slist, struct str_node *sn);
+int strlist__load(struct strlist *slist, const char *filename);
+int strlist__add(struct strlist *slist, const char *str);
 
-struct str_node *strlist__entry(const struct strlist *self, unsigned int idx);
-struct str_node *strlist__find(struct strlist *self, const char *entry);
+struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx);
+struct str_node *strlist__find(struct strlist *slist, const char *entry);
 
-static inline bool strlist__has_entry(struct strlist *self, const char *entry)
+static inline bool strlist__has_entry(struct strlist *slist, const char *entry)
 {
-       return strlist__find(self, entry) != NULL;
+       return strlist__find(slist, entry) != NULL;
 }
 
-static inline bool strlist__empty(const struct strlist *self)
+static inline bool strlist__empty(const struct strlist *slist)
 {
-       return rblist__empty(&self->rblist);
+       return rblist__empty(&slist->rblist);
 }
 
-static inline unsigned int strlist__nr_entries(const struct strlist *self)
+static inline unsigned int strlist__nr_entries(const struct strlist *slist)
 {
-       return rblist__nr_entries(&self->rblist);
+       return rblist__nr_entries(&slist->rblist);
 }
 
 /* For strlist iteration */
-static inline struct str_node *strlist__first(struct strlist *self)
+static inline struct str_node *strlist__first(struct strlist *slist)
 {
-       struct rb_node *rn = rb_first(&self->rblist.entries);
+       struct rb_node *rn = rb_first(&slist->rblist.entries);
        return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
 }
 static inline struct str_node *strlist__next(struct str_node *sn)
@@ -59,21 +59,21 @@ static inline struct str_node *strlist__next(struct str_node *sn)
 /**
  * strlist_for_each      - iterate over a strlist
  * @pos:       the &struct str_node to use as a loop cursor.
- * @self:      the &struct strlist for loop.
+ * @slist:     the &struct strlist for loop.
  */
-#define strlist__for_each(pos, self)   \
-       for (pos = strlist__first(self); pos; pos = strlist__next(pos))
+#define strlist__for_each(pos, slist)  \
+       for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
 
 /**
  * strlist_for_each_safe - iterate over a strlist safe against removal of
  *                         str_node
  * @pos:       the &struct str_node to use as a loop cursor.
  * @n:         another &struct str_node to use as temporary storage.
- * @self:      the &struct strlist for loop.
+ * @slist:     the &struct strlist for loop.
  */
-#define strlist__for_each_safe(pos, n, self)   \
-       for (pos = strlist__first(self), n = strlist__next(pos); pos;\
+#define strlist__for_each_safe(pos, n, slist)  \
+       for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
             pos = n, n = strlist__next(n))
 
-int strlist__parse_list(struct strlist *self, const char *s);
+int strlist__parse_list(struct strlist *slist, const char *s);
 #endif /* __PERF_STRLIST_H */
index db0cc92cf2eaf25a3528429154b1f9713f1d91d8..54efcb5659ac2bf2f6061a5c64e496f1b408d54c 100644 (file)
@@ -1,6 +1,3 @@
-#include <libelf.h>
-#include <gelf.h>
-#include <elf.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <errno.h>
@@ -718,6 +715,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
                                        sym.st_value);
                        used_opd = true;
                }
+               /*
+                * When loading symbols in a data mapping, ABS symbols (which
+                * has a value of SHN_ABS in its st_shndx) failed at
+                * elf_getscn().  And it marks the loading as a failure so
+                * already loaded symbols cannot be fixed up.
+                *
+                * I'm not sure what should be done. Just ignore them for now.
+                * - Namhyung Kim
+                */
+               if (sym.st_shndx == SHN_ABS)
+                       continue;
 
                sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
                if (!sec)
index 259f8f2ea9c96468b84100c511ea0878f9d53f0c..a7390cde63bc74f8dae01422dd92bbc2e1a09d3c 100644 (file)
@@ -1,6 +1,5 @@
 #include "symbol.h"
 
-#include <elf.h>
 #include <stdio.h>
 #include <fcntl.h>
 #include <string.h>
index 295f8d4feedfeb00afd11787c3e7538e5fb19c74..e6432d85b43dd9de0fc6470b271de72a3f6028b8 100644 (file)
@@ -28,8 +28,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
                                symbol_filter_t filter);
 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
                        symbol_filter_t filter);
-static int vmlinux_path__nr_entries;
-static char **vmlinux_path;
+int vmlinux_path__nr_entries;
+char **vmlinux_path;
 
 struct symbol_conf symbol_conf = {
        .exclude_other    = true,
@@ -202,13 +202,6 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
        curr->end = ~0ULL;
 }
 
-static void map_groups__fixup_end(struct map_groups *mg)
-{
-       int i;
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               __map_groups__fixup_end(mg, i);
-}
-
 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
 {
        size_t namelen = strlen(name) + 1;
@@ -652,8 +645,8 @@ discard_symbol:             rb_erase(&pos->rb_node, root);
        return count + moved;
 }
 
-static bool symbol__restricted_filename(const char *filename,
-                                       const char *restricted_filename)
+bool symbol__restricted_filename(const char *filename,
+                                const char *restricted_filename)
 {
        bool restricted = false;
 
@@ -775,10 +768,6 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        else
                machine = NULL;
 
-       name = malloc(PATH_MAX);
-       if (!name)
-               return -1;
-
        dso->adjust_symbols = 0;
 
        if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
@@ -802,6 +791,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        if (machine)
                root_dir = machine->root_dir;
 
+       name = malloc(PATH_MAX);
+       if (!name)
+               return -1;
+
        /* Iterate over candidate debug images.
         * Keep track of "interesting" ones (those which have a symtab, dynsym,
         * and/or opd section) for processing.
@@ -887,200 +880,6 @@ struct map *map_groups__find_by_name(struct map_groups *mg,
        return NULL;
 }
 
-static int map_groups__set_modules_path_dir(struct map_groups *mg,
-                               const char *dir_name)
-{
-       struct dirent *dent;
-       DIR *dir = opendir(dir_name);
-       int ret = 0;
-
-       if (!dir) {
-               pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
-               return -1;
-       }
-
-       while ((dent = readdir(dir)) != NULL) {
-               char path[PATH_MAX];
-               struct stat st;
-
-               /*sshfs might return bad dent->d_type, so we have to stat*/
-               snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
-               if (stat(path, &st))
-                       continue;
-
-               if (S_ISDIR(st.st_mode)) {
-                       if (!strcmp(dent->d_name, ".") ||
-                           !strcmp(dent->d_name, ".."))
-                               continue;
-
-                       ret = map_groups__set_modules_path_dir(mg, path);
-                       if (ret < 0)
-                               goto out;
-               } else {
-                       char *dot = strrchr(dent->d_name, '.'),
-                            dso_name[PATH_MAX];
-                       struct map *map;
-                       char *long_name;
-
-                       if (dot == NULL || strcmp(dot, ".ko"))
-                               continue;
-                       snprintf(dso_name, sizeof(dso_name), "[%.*s]",
-                                (int)(dot - dent->d_name), dent->d_name);
-
-                       strxfrchar(dso_name, '-', '_');
-                       map = map_groups__find_by_name(mg, MAP__FUNCTION,
-                                                      dso_name);
-                       if (map == NULL)
-                               continue;
-
-                       long_name = strdup(path);
-                       if (long_name == NULL) {
-                               ret = -1;
-                               goto out;
-                       }
-                       dso__set_long_name(map->dso, long_name);
-                       map->dso->lname_alloc = 1;
-                       dso__kernel_module_get_build_id(map->dso, "");
-               }
-       }
-
-out:
-       closedir(dir);
-       return ret;
-}
-
-static char *get_kernel_version(const char *root_dir)
-{
-       char version[PATH_MAX];
-       FILE *file;
-       char *name, *tmp;
-       const char *prefix = "Linux version ";
-
-       sprintf(version, "%s/proc/version", root_dir);
-       file = fopen(version, "r");
-       if (!file)
-               return NULL;
-
-       version[0] = '\0';
-       tmp = fgets(version, sizeof(version), file);
-       fclose(file);
-
-       name = strstr(version, prefix);
-       if (!name)
-               return NULL;
-       name += strlen(prefix);
-       tmp = strchr(name, ' ');
-       if (tmp)
-               *tmp = '\0';
-
-       return strdup(name);
-}
-
-static int machine__set_modules_path(struct machine *machine)
-{
-       char *version;
-       char modules_path[PATH_MAX];
-
-       version = get_kernel_version(machine->root_dir);
-       if (!version)
-               return -1;
-
-       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
-                machine->root_dir, version);
-       free(version);
-
-       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
-}
-
-struct map *machine__new_module(struct machine *machine, u64 start,
-                               const char *filename)
-{
-       struct map *map;
-       struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
-
-       if (dso == NULL)
-               return NULL;
-
-       map = map__new2(start, dso, MAP__FUNCTION);
-       if (map == NULL)
-               return NULL;
-
-       if (machine__is_host(machine))
-               dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
-       else
-               dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
-       map_groups__insert(&machine->kmaps, map);
-       return map;
-}
-
-static int machine__create_modules(struct machine *machine)
-{
-       char *line = NULL;
-       size_t n;
-       FILE *file;
-       struct map *map;
-       const char *modules;
-       char path[PATH_MAX];
-
-       if (machine__is_default_guest(machine))
-               modules = symbol_conf.default_guest_modules;
-       else {
-               sprintf(path, "%s/proc/modules", machine->root_dir);
-               modules = path;
-       }
-
-       if (symbol__restricted_filename(path, "/proc/modules"))
-               return -1;
-
-       file = fopen(modules, "r");
-       if (file == NULL)
-               return -1;
-
-       while (!feof(file)) {
-               char name[PATH_MAX];
-               u64 start;
-               char *sep;
-               int line_len;
-
-               line_len = getline(&line, &n, file);
-               if (line_len < 0)
-                       break;
-
-               if (!line)
-                       goto out_failure;
-
-               line[--line_len] = '\0'; /* \n */
-
-               sep = strrchr(line, 'x');
-               if (sep == NULL)
-                       continue;
-
-               hex2u64(sep + 1, &start);
-
-               sep = strchr(line, ' ');
-               if (sep == NULL)
-                       continue;
-
-               *sep = '\0';
-
-               snprintf(name, sizeof(name), "[%s]", line);
-               map = machine__new_module(machine, start, name);
-               if (map == NULL)
-                       goto out_delete_line;
-               dso__kernel_module_get_build_id(map->dso, machine->root_dir);
-       }
-
-       free(line);
-       fclose(file);
-
-       return machine__set_modules_path(machine);
-
-out_delete_line:
-       free(line);
-out_failure:
-       return -1;
-}
-
 int dso__load_vmlinux(struct dso *dso, struct map *map,
                      const char *vmlinux, symbol_filter_t filter)
 {
@@ -1124,8 +923,10 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
        filename = dso__build_id_filename(dso, NULL, 0);
        if (filename != NULL) {
                err = dso__load_vmlinux(dso, map, filename, filter);
-               if (err > 0)
+               if (err > 0) {
+                       dso->lname_alloc = 1;
                        goto out;
+               }
                free(filename);
        }
 
@@ -1133,6 +934,7 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map,
                err = dso__load_vmlinux(dso, map, vmlinux_path[i], filter);
                if (err > 0) {
                        dso__set_long_name(dso, strdup(vmlinux_path[i]));
+                       dso->lname_alloc = 1;
                        break;
                }
        }
@@ -1172,6 +974,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
                if (err > 0) {
                        dso__set_long_name(dso,
                                           strdup(symbol_conf.vmlinux_name));
+                       dso->lname_alloc = 1;
                        goto out_fixup;
                }
                return err;
@@ -1300,195 +1103,6 @@ out_try_fixup:
        return err;
 }
 
-size_t machines__fprintf_dsos(struct rb_root *machines, FILE *fp)
-{
-       struct rb_node *nd;
-       size_t ret = 0;
-
-       for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
-               struct machine *pos = rb_entry(nd, struct machine, rb_node);
-               ret += __dsos__fprintf(&pos->kernel_dsos, fp);
-               ret += __dsos__fprintf(&pos->user_dsos, fp);
-       }
-
-       return ret;
-}
-
-size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
-                                    bool with_hits)
-{
-       return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, with_hits) +
-              __dsos__fprintf_buildid(&machine->user_dsos, fp, with_hits);
-}
-
-size_t machines__fprintf_dsos_buildid(struct rb_root *machines,
-                                     FILE *fp, bool with_hits)
-{
-       struct rb_node *nd;
-       size_t ret = 0;
-
-       for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
-               struct machine *pos = rb_entry(nd, struct machine, rb_node);
-               ret += machine__fprintf_dsos_buildid(pos, fp, with_hits);
-       }
-       return ret;
-}
-
-static struct dso *machine__get_kernel(struct machine *machine)
-{
-       const char *vmlinux_name = NULL;
-       struct dso *kernel;
-
-       if (machine__is_host(machine)) {
-               vmlinux_name = symbol_conf.vmlinux_name;
-               if (!vmlinux_name)
-                       vmlinux_name = "[kernel.kallsyms]";
-
-               kernel = dso__kernel_findnew(machine, vmlinux_name,
-                                            "[kernel]",
-                                            DSO_TYPE_KERNEL);
-       } else {
-               char bf[PATH_MAX];
-
-               if (machine__is_default_guest(machine))
-                       vmlinux_name = symbol_conf.default_guest_vmlinux_name;
-               if (!vmlinux_name)
-                       vmlinux_name = machine__mmap_name(machine, bf,
-                                                         sizeof(bf));
-
-               kernel = dso__kernel_findnew(machine, vmlinux_name,
-                                            "[guest.kernel]",
-                                            DSO_TYPE_GUEST_KERNEL);
-       }
-
-       if (kernel != NULL && (!kernel->has_build_id))
-               dso__read_running_kernel_build_id(kernel, machine);
-
-       return kernel;
-}
-
-struct process_args {
-       u64 start;
-};
-
-static int symbol__in_kernel(void *arg, const char *name,
-                            char type __maybe_unused, u64 start)
-{
-       struct process_args *args = arg;
-
-       if (strchr(name, '['))
-               return 0;
-
-       args->start = start;
-       return 1;
-}
-
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
-{
-       const char *filename;
-       char path[PATH_MAX];
-       struct process_args args;
-
-       if (machine__is_host(machine)) {
-               filename = "/proc/kallsyms";
-       } else {
-               if (machine__is_default_guest(machine))
-                       filename = (char *)symbol_conf.default_guest_kallsyms;
-               else {
-                       sprintf(path, "%s/proc/kallsyms", machine->root_dir);
-                       filename = path;
-               }
-       }
-
-       if (symbol__restricted_filename(filename, "/proc/kallsyms"))
-               return 0;
-
-       if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
-               return 0;
-
-       return args.start;
-}
-
-int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
-{
-       enum map_type type;
-       u64 start = machine__get_kernel_start_addr(machine);
-
-       for (type = 0; type < MAP__NR_TYPES; ++type) {
-               struct kmap *kmap;
-
-               machine->vmlinux_maps[type] = map__new2(start, kernel, type);
-               if (machine->vmlinux_maps[type] == NULL)
-                       return -1;
-
-               machine->vmlinux_maps[type]->map_ip =
-                       machine->vmlinux_maps[type]->unmap_ip =
-                               identity__map_ip;
-               kmap = map__kmap(machine->vmlinux_maps[type]);
-               kmap->kmaps = &machine->kmaps;
-               map_groups__insert(&machine->kmaps,
-                                  machine->vmlinux_maps[type]);
-       }
-
-       return 0;
-}
-
-void machine__destroy_kernel_maps(struct machine *machine)
-{
-       enum map_type type;
-
-       for (type = 0; type < MAP__NR_TYPES; ++type) {
-               struct kmap *kmap;
-
-               if (machine->vmlinux_maps[type] == NULL)
-                       continue;
-
-               kmap = map__kmap(machine->vmlinux_maps[type]);
-               map_groups__remove(&machine->kmaps,
-                                  machine->vmlinux_maps[type]);
-               if (kmap->ref_reloc_sym) {
-                       /*
-                        * ref_reloc_sym is shared among all maps, so free just
-                        * on one of them.
-                        */
-                       if (type == MAP__FUNCTION) {
-                               free((char *)kmap->ref_reloc_sym->name);
-                               kmap->ref_reloc_sym->name = NULL;
-                               free(kmap->ref_reloc_sym);
-                       }
-                       kmap->ref_reloc_sym = NULL;
-               }
-
-               map__delete(machine->vmlinux_maps[type]);
-               machine->vmlinux_maps[type] = NULL;
-       }
-}
-
-int machine__create_kernel_maps(struct machine *machine)
-{
-       struct dso *kernel = machine__get_kernel(machine);
-
-       if (kernel == NULL ||
-           __machine__create_kernel_maps(machine, kernel) < 0)
-               return -1;
-
-       if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
-               if (machine__is_host(machine))
-                       pr_debug("Problems creating module maps, "
-                                "continuing anyway...\n");
-               else
-                       pr_debug("Problems creating module maps for guest %d, "
-                                "continuing anyway...\n", machine->pid);
-       }
-
-       /*
-        * Now that we have all the maps created, just set the ->end of them:
-        */
-       map_groups__fixup_end(&machine->kmaps);
-       return 0;
-}
-
 static void vmlinux_path__exit(void)
 {
        while (--vmlinux_path__nr_entries >= 0) {
@@ -1549,25 +1163,6 @@ out_fail:
        return -1;
 }
 
-size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
-{
-       int i;
-       size_t printed = 0;
-       struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
-
-       if (kdso->has_build_id) {
-               char filename[PATH_MAX];
-               if (dso__build_id_filename(kdso, filename, sizeof(filename)))
-                       printed += fprintf(fp, "[0] %s\n", filename);
-       }
-
-       for (i = 0; i < vmlinux_path__nr_entries; ++i)
-               printed += fprintf(fp, "[%d] %s\n",
-                                  i + kdso->has_build_id, vmlinux_path[i]);
-
-       return printed;
-}
-
 static int setup_list(struct strlist **list, const char *list_str,
                      const char *list_name)
 {
@@ -1671,108 +1266,3 @@ void symbol__exit(void)
        symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
        symbol_conf.initialized = false;
 }
-
-int machines__create_kernel_maps(struct rb_root *machines, pid_t pid)
-{
-       struct machine *machine = machines__findnew(machines, pid);
-
-       if (machine == NULL)
-               return -1;
-
-       return machine__create_kernel_maps(machine);
-}
-
-int machines__create_guest_kernel_maps(struct rb_root *machines)
-{
-       int ret = 0;
-       struct dirent **namelist = NULL;
-       int i, items = 0;
-       char path[PATH_MAX];
-       pid_t pid;
-       char *endp;
-
-       if (symbol_conf.default_guest_vmlinux_name ||
-           symbol_conf.default_guest_modules ||
-           symbol_conf.default_guest_kallsyms) {
-               machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
-       }
-
-       if (symbol_conf.guestmount) {
-               items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
-               if (items <= 0)
-                       return -ENOENT;
-               for (i = 0; i < items; i++) {
-                       if (!isdigit(namelist[i]->d_name[0])) {
-                               /* Filter out . and .. */
-                               continue;
-                       }
-                       pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
-                       if ((*endp != '\0') ||
-                           (endp == namelist[i]->d_name) ||
-                           (errno == ERANGE)) {
-                               pr_debug("invalid directory (%s). Skipping.\n",
-                                        namelist[i]->d_name);
-                               continue;
-                       }
-                       sprintf(path, "%s/%s/proc/kallsyms",
-                               symbol_conf.guestmount,
-                               namelist[i]->d_name);
-                       ret = access(path, R_OK);
-                       if (ret) {
-                               pr_debug("Can't access file %s\n", path);
-                               goto failure;
-                       }
-                       machines__create_kernel_maps(machines, pid);
-               }
-failure:
-               free(namelist);
-       }
-
-       return ret;
-}
-
-void machines__destroy_guest_kernel_maps(struct rb_root *machines)
-{
-       struct rb_node *next = rb_first(machines);
-
-       while (next) {
-               struct machine *pos = rb_entry(next, struct machine, rb_node);
-
-               next = rb_next(&pos->rb_node);
-               rb_erase(&pos->rb_node, machines);
-               machine__delete(pos);
-       }
-}
-
-int machine__load_kallsyms(struct machine *machine, const char *filename,
-                          enum map_type type, symbol_filter_t filter)
-{
-       struct map *map = machine->vmlinux_maps[type];
-       int ret = dso__load_kallsyms(map->dso, filename, map, filter);
-
-       if (ret > 0) {
-               dso__set_loaded(map->dso, type);
-               /*
-                * Since /proc/kallsyms will have multiple sessions for the
-                * kernel, with modules between them, fixup the end of all
-                * sections.
-                */
-               __map_groups__fixup_end(&machine->kmaps, type);
-       }
-
-       return ret;
-}
-
-int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
-                              symbol_filter_t filter)
-{
-       struct map *map = machine->vmlinux_maps[type];
-       int ret = dso__load_vmlinux_path(map->dso, map, filter);
-
-       if (ret > 0) {
-               dso__set_loaded(map->dso, type);
-               map__reloc_vmlinux(map);
-       }
-
-       return ret;
-}
index de68f98b236d42231ce5e1307367adeb45d9351e..b62ca37c4b77aff3e2f6e96387675842365f1bf9 100644 (file)
@@ -16,8 +16,8 @@
 #ifdef LIBELF_SUPPORT
 #include <libelf.h>
 #include <gelf.h>
-#include <elf.h>
 #endif
+#include <elf.h>
 
 #include "dso.h"
 
@@ -96,7 +96,8 @@ struct symbol_conf {
                        initialized,
                        kptr_restrict,
                        annotate_asm_raw,
-                       annotate_src;
+                       annotate_src,
+                       event_group;
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,
@@ -120,6 +121,8 @@ struct symbol_conf {
 };
 
 extern struct symbol_conf symbol_conf;
+extern int vmlinux_path__nr_entries;
+extern char **vmlinux_path;
 
 static inline void *symbol__priv(struct symbol *sym)
 {
@@ -223,6 +226,8 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
 size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp);
 size_t symbol__fprintf(struct symbol *sym, FILE *fp);
 bool symbol_type__is_a(char symbol_type, enum map_type map_type);
+bool symbol__restricted_filename(const char *filename,
+                                const char *restricted_filename);
 
 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
                  struct symsrc *runtime_ss, symbol_filter_t filter,
index 48c6902e749f7d78bf1d8a8b0a85fa4711565221..f71e9eafe15a7323e127145c13cd0240daae6136 100644 (file)
@@ -8,7 +8,7 @@ static const char * const sysfs_known_mountpoints[] = {
 };
 
 static int sysfs_found;
-char sysfs_mountpoint[PATH_MAX];
+char sysfs_mountpoint[PATH_MAX + 1];
 
 static int sysfs_valid_mountpoint(const char *sysfs)
 {
index df59623ac7630ccc67ee6f5cbaf5fae1c2847782..632e40e5ceca6d301a764caab574628a6ca89c9a 100644 (file)
@@ -54,10 +54,10 @@ int thread__comm_len(struct thread *self)
        return self->comm_len;
 }
 
-static size_t thread__fprintf(struct thread *self, FILE *fp)
+size_t thread__fprintf(struct thread *thread, FILE *fp)
 {
-       return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
-              map_groups__fprintf(&self->mg, verbose, fp);
+       return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) +
+              map_groups__fprintf(&thread->mg, verbose, fp);
 }
 
 void thread__insert_map(struct thread *self, struct map *map)
@@ -84,17 +84,3 @@ int thread__fork(struct thread *self, struct thread *parent)
                        return -ENOMEM;
        return 0;
 }
-
-size_t machine__fprintf(struct machine *machine, FILE *fp)
-{
-       size_t ret = 0;
-       struct rb_node *nd;
-
-       for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
-               struct thread *pos = rb_entry(nd, struct thread, rb_node);
-
-               ret += thread__fprintf(pos, fp);
-       }
-
-       return ret;
-}
index f2fa17caa7d599e129a6901e0772e9a3916fd2d8..5ad266403098d18ab53e907b08a2ae8e3768930e 100644 (file)
@@ -30,6 +30,7 @@ int thread__set_comm(struct thread *self, const char *comm);
 int thread__comm_len(struct thread *self);
 void thread__insert_map(struct thread *self, struct map *map);
 int thread__fork(struct thread *self, struct thread *parent);
+size_t thread__fprintf(struct thread *thread, FILE *fp);
 
 static inline struct map *thread__find_map(struct thread *self,
                                           enum map_type type, u64 addr)
index 884dde9b9bc1334294bc42d7d55bea65ba23f364..54d37a4753c5a5618b67509fa27156c439d6702a 100644 (file)
@@ -26,6 +26,8 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
        float samples_per_sec = top->samples / top->delay_secs;
        float ksamples_per_sec = top->kernel_samples / top->delay_secs;
        float esamples_percent = (100.0 * top->exact_samples) / top->samples;
+       struct perf_record_opts *opts = &top->record_opts;
+       struct perf_target *target = &opts->target;
        size_t ret = 0;
 
        if (!perf_guest) {
@@ -61,31 +63,31 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
                struct perf_evsel *first = perf_evlist__first(top->evlist);
                ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
                                (uint64_t)first->attr.sample_period,
-                               top->freq ? "Hz" : "");
+                               opts->freq ? "Hz" : "");
        }
 
        ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
 
        ret += SNPRINTF(bf + ret, size - ret, "], ");
 
-       if (top->target.pid)
+       if (target->pid)
                ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %s",
-                               top->target.pid);
-       else if (top->target.tid)
+                               target->pid);
+       else if (target->tid)
                ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %s",
-                               top->target.tid);
-       else if (top->target.uid_str != NULL)
+                               target->tid);
+       else if (target->uid_str != NULL)
                ret += SNPRINTF(bf + ret, size - ret, " (uid: %s",
-                               top->target.uid_str);
+                               target->uid_str);
        else
                ret += SNPRINTF(bf + ret, size - ret, " (all");
 
-       if (top->target.cpu_list)
+       if (target->cpu_list)
                ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
                                top->evlist->cpus->nr > 1 ? "s" : "",
-                               top->target.cpu_list);
+                               target->cpu_list);
        else {
-               if (top->target.tid)
+               if (target->tid)
                        ret += SNPRINTF(bf + ret, size - ret, ")");
                else
                        ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
index 86ff1b15059b6a1d7be4ace532a29df04b244683..7ebf357dc9e12870a8878fd0b76c6ef295bfa892 100644 (file)
@@ -14,7 +14,7 @@ struct perf_session;
 struct perf_top {
        struct perf_tool   tool;
        struct perf_evlist *evlist;
-       struct perf_target target;
+       struct perf_record_opts record_opts;
        /*
         * Symbols will be added here in perf_event__process_sample and will
         * get out after decayed.
@@ -24,24 +24,16 @@ struct perf_top {
        u64                exact_samples;
        u64                guest_us_samples, guest_kernel_samples;
        int                print_entries, count_filter, delay_secs;
-       int                freq;
        bool               hide_kernel_symbols, hide_user_symbols, zero;
        bool               use_tui, use_stdio;
        bool               sort_has_symbols;
-       bool               dont_use_callchains;
        bool               kptr_restrict_warned;
        bool               vmlinux_warned;
-       bool               inherit;
-       bool               group;
-       bool               sample_id_all_missing;
-       bool               exclude_guest_missing;
        bool               dump_symtab;
        struct hist_entry  *sym_filter_entry;
        struct perf_evsel  *sym_evsel;
        struct perf_session *session;
        struct winsize     winsize;
-       unsigned int       mmap_pages;
-       int                default_interval;
        int                realtime_prio;
        int                sym_pcnt_filter;
        const char         *sym_filter;
index 5906e8426cc7698161ed09ac30e52291107d9b40..805d1f52c5b48034772a6f1c44cb29776e82e886 100644 (file)
@@ -12,6 +12,8 @@
  */
 unsigned int page_size;
 
+bool test_attr__enabled;
+
 bool perf_host  = true;
 bool perf_guest = false;
 
@@ -218,3 +220,25 @@ void dump_stack(void)
 #else
 void dump_stack(void) {}
 #endif
+
+void get_term_dimensions(struct winsize *ws)
+{
+       char *s = getenv("LINES");
+
+       if (s != NULL) {
+               ws->ws_row = atoi(s);
+               s = getenv("COLUMNS");
+               if (s != NULL) {
+                       ws->ws_col = atoi(s);
+                       if (ws->ws_row && ws->ws_col)
+                               return;
+               }
+       }
+#ifdef TIOCGWINSZ
+       if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
+           ws->ws_row && ws->ws_col)
+               return;
+#endif
+       ws->ws_row = 25;
+       ws->ws_col = 80;
+}
index c2330918110c13a6b227697f0425abfd526076cc..09b4c26b71aabbfbfcadc6ae16bb7a651a0856c7 100644 (file)
@@ -265,10 +265,14 @@ bool is_power_of_2(unsigned long n)
 size_t hex_width(u64 v);
 int hex2u64(const char *ptr, u64 *val);
 
+char *ltrim(char *s);
 char *rtrim(char *s);
 
 void dump_stack(void);
 
 extern unsigned int page_size;
 
+struct winsize;
+void get_term_dimensions(struct winsize *ws);
+
 #endif
diff --git a/tools/vm/.gitignore b/tools/vm/.gitignore
new file mode 100644 (file)
index 0000000..44f095f
--- /dev/null
@@ -0,0 +1,2 @@
+slabinfo
+page-types